var/home/core/zuul-output/0000755000175000017500000000000015127141654014533 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015127156163015500 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005400652115127156155017710 0ustar rootrootJan 06 08:14:57 crc systemd[1]: Starting Kubernetes Kubelet... Jan 06 08:14:57 crc restorecon[4762]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 06 08:14:57 crc restorecon[4762]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 06 08:14:57 crc restorecon[4762]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 06 08:14:58 crc kubenswrapper[4784]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 06 08:14:58 crc kubenswrapper[4784]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 06 08:14:58 crc kubenswrapper[4784]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 06 08:14:58 crc kubenswrapper[4784]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 06 08:14:58 crc kubenswrapper[4784]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 06 08:14:58 crc kubenswrapper[4784]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.123637 4784 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127405 4784 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127428 4784 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127434 4784 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127439 4784 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127445 4784 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127450 4784 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127456 4784 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127462 4784 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127467 4784 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127473 4784 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127478 4784 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127484 4784 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127489 4784 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127494 4784 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127500 4784 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127507 4784 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127514 4784 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127519 4784 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127525 4784 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127530 4784 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127535 4784 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127541 4784 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127562 4784 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127568 4784 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127573 4784 feature_gate.go:330] unrecognized feature gate: Example Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127578 4784 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127582 4784 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127587 4784 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127592 4784 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127596 4784 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127602 4784 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127606 4784 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127611 4784 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127616 4784 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127621 4784 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127625 4784 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127630 4784 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127635 4784 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127640 4784 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127645 4784 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127650 4784 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127655 4784 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127660 4784 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127666 4784 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127671 4784 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127676 4784 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127680 4784 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127687 4784 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127693 4784 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127699 4784 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127705 4784 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127711 4784 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127716 4784 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127723 4784 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127731 4784 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127736 4784 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127741 4784 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127746 4784 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127752 4784 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127756 4784 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127761 4784 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127766 4784 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127771 4784 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127776 4784 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127782 4784 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127787 4784 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127793 4784 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127799 4784 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127804 4784 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127809 4784 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.127814 4784 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.127906 4784 flags.go:64] FLAG: --address="0.0.0.0" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.127917 4784 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.127926 4784 flags.go:64] FLAG: --anonymous-auth="true" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.127934 4784 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.127941 4784 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.127946 4784 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.127954 4784 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.127961 4784 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.127967 4784 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.127972 4784 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.127979 4784 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.127985 4784 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.127990 4784 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.127996 4784 flags.go:64] FLAG: --cgroup-root="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128001 4784 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128008 4784 flags.go:64] FLAG: --client-ca-file="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128014 4784 flags.go:64] FLAG: --cloud-config="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128021 4784 flags.go:64] FLAG: --cloud-provider="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128027 4784 flags.go:64] FLAG: --cluster-dns="[]" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128035 4784 flags.go:64] FLAG: --cluster-domain="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128041 4784 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128047 4784 flags.go:64] FLAG: --config-dir="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128052 4784 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128059 4784 flags.go:64] FLAG: --container-log-max-files="5" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128066 4784 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128071 4784 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128077 4784 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128083 4784 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128088 4784 flags.go:64] FLAG: --contention-profiling="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128094 4784 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128100 4784 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128106 4784 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128112 4784 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128118 4784 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128124 4784 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128130 4784 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128136 4784 flags.go:64] FLAG: --enable-load-reader="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128141 4784 flags.go:64] FLAG: --enable-server="true" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128147 4784 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128154 4784 flags.go:64] FLAG: --event-burst="100" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128160 4784 flags.go:64] FLAG: --event-qps="50" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128165 4784 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128171 4784 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128176 4784 flags.go:64] FLAG: --eviction-hard="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128184 4784 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128189 4784 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128194 4784 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128200 4784 flags.go:64] FLAG: --eviction-soft="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128205 4784 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128211 4784 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128217 4784 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128222 4784 flags.go:64] FLAG: --experimental-mounter-path="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128227 4784 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128233 4784 flags.go:64] FLAG: --fail-swap-on="true" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128238 4784 flags.go:64] FLAG: --feature-gates="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128245 4784 flags.go:64] FLAG: --file-check-frequency="20s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128250 4784 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128256 4784 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128262 4784 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128268 4784 flags.go:64] FLAG: --healthz-port="10248" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128273 4784 flags.go:64] FLAG: --help="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128279 4784 flags.go:64] FLAG: --hostname-override="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128284 4784 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128290 4784 flags.go:64] FLAG: --http-check-frequency="20s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128296 4784 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128301 4784 flags.go:64] FLAG: --image-credential-provider-config="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128307 4784 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128317 4784 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128324 4784 flags.go:64] FLAG: --image-service-endpoint="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128330 4784 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128336 4784 flags.go:64] FLAG: --kube-api-burst="100" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128342 4784 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128348 4784 flags.go:64] FLAG: --kube-api-qps="50" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128354 4784 flags.go:64] FLAG: --kube-reserved="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128360 4784 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128365 4784 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128371 4784 flags.go:64] FLAG: --kubelet-cgroups="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128377 4784 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128382 4784 flags.go:64] FLAG: --lock-file="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128387 4784 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128393 4784 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128399 4784 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128407 4784 flags.go:64] FLAG: --log-json-split-stream="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128412 4784 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128418 4784 flags.go:64] FLAG: --log-text-split-stream="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128423 4784 flags.go:64] FLAG: --logging-format="text" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128429 4784 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128435 4784 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128440 4784 flags.go:64] FLAG: --manifest-url="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128446 4784 flags.go:64] FLAG: --manifest-url-header="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128453 4784 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128459 4784 flags.go:64] FLAG: --max-open-files="1000000" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128465 4784 flags.go:64] FLAG: --max-pods="110" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128471 4784 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128476 4784 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128482 4784 flags.go:64] FLAG: --memory-manager-policy="None" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128488 4784 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128494 4784 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128499 4784 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128505 4784 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128517 4784 flags.go:64] FLAG: --node-status-max-images="50" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128523 4784 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128529 4784 flags.go:64] FLAG: --oom-score-adj="-999" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128535 4784 flags.go:64] FLAG: --pod-cidr="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128556 4784 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128565 4784 flags.go:64] FLAG: --pod-manifest-path="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128570 4784 flags.go:64] FLAG: --pod-max-pids="-1" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128576 4784 flags.go:64] FLAG: --pods-per-core="0" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128582 4784 flags.go:64] FLAG: --port="10250" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128587 4784 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128593 4784 flags.go:64] FLAG: --provider-id="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128598 4784 flags.go:64] FLAG: --qos-reserved="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128604 4784 flags.go:64] FLAG: --read-only-port="10255" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128610 4784 flags.go:64] FLAG: --register-node="true" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128615 4784 flags.go:64] FLAG: --register-schedulable="true" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128620 4784 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128638 4784 flags.go:64] FLAG: --registry-burst="10" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128643 4784 flags.go:64] FLAG: --registry-qps="5" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128649 4784 flags.go:64] FLAG: --reserved-cpus="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128654 4784 flags.go:64] FLAG: --reserved-memory="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128661 4784 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128667 4784 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128673 4784 flags.go:64] FLAG: --rotate-certificates="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128678 4784 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128684 4784 flags.go:64] FLAG: --runonce="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128689 4784 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128699 4784 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128705 4784 flags.go:64] FLAG: --seccomp-default="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128710 4784 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128715 4784 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128721 4784 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128727 4784 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128733 4784 flags.go:64] FLAG: --storage-driver-password="root" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128739 4784 flags.go:64] FLAG: --storage-driver-secure="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128744 4784 flags.go:64] FLAG: --storage-driver-table="stats" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128750 4784 flags.go:64] FLAG: --storage-driver-user="root" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128755 4784 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128760 4784 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128767 4784 flags.go:64] FLAG: --system-cgroups="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128773 4784 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128781 4784 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128787 4784 flags.go:64] FLAG: --tls-cert-file="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128792 4784 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128800 4784 flags.go:64] FLAG: --tls-min-version="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128805 4784 flags.go:64] FLAG: --tls-private-key-file="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128811 4784 flags.go:64] FLAG: --topology-manager-policy="none" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128816 4784 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128822 4784 flags.go:64] FLAG: --topology-manager-scope="container" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128827 4784 flags.go:64] FLAG: --v="2" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128834 4784 flags.go:64] FLAG: --version="false" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128841 4784 flags.go:64] FLAG: --vmodule="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128848 4784 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.128854 4784 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.128987 4784 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.128993 4784 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.128998 4784 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129006 4784 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129013 4784 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129021 4784 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129027 4784 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129034 4784 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129039 4784 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129045 4784 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129050 4784 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129056 4784 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129061 4784 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129066 4784 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129071 4784 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129076 4784 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129081 4784 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129086 4784 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129091 4784 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129097 4784 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129103 4784 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129109 4784 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129115 4784 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129120 4784 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129125 4784 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129129 4784 feature_gate.go:330] unrecognized feature gate: Example Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129134 4784 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129139 4784 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129144 4784 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129149 4784 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129154 4784 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129158 4784 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129163 4784 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129168 4784 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129173 4784 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129178 4784 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129182 4784 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129190 4784 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129196 4784 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129202 4784 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129207 4784 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129213 4784 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129218 4784 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129223 4784 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129229 4784 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129234 4784 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129239 4784 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129244 4784 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129249 4784 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129253 4784 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129258 4784 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129263 4784 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129269 4784 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129275 4784 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129280 4784 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129285 4784 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129289 4784 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129294 4784 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129300 4784 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129305 4784 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129310 4784 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129316 4784 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129321 4784 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129326 4784 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129331 4784 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129336 4784 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129340 4784 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129345 4784 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129350 4784 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129357 4784 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.129361 4784 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.129376 4784 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.140815 4784 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.140870 4784 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141048 4784 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141063 4784 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141075 4784 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141085 4784 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141096 4784 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141105 4784 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141115 4784 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141124 4784 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141133 4784 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141142 4784 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141151 4784 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141160 4784 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141169 4784 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141178 4784 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141187 4784 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141199 4784 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141209 4784 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141221 4784 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141229 4784 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141238 4784 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141248 4784 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141259 4784 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141268 4784 feature_gate.go:330] unrecognized feature gate: Example Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141276 4784 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141285 4784 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141295 4784 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141308 4784 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141322 4784 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141334 4784 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141344 4784 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141354 4784 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141365 4784 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141374 4784 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141384 4784 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141393 4784 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141403 4784 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141413 4784 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141422 4784 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141432 4784 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141440 4784 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141450 4784 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141460 4784 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141469 4784 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141478 4784 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141487 4784 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141496 4784 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141505 4784 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141514 4784 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141526 4784 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141537 4784 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141574 4784 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141584 4784 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141594 4784 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141605 4784 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141614 4784 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141624 4784 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141636 4784 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141651 4784 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141662 4784 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141673 4784 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141683 4784 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141691 4784 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141701 4784 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141714 4784 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141725 4784 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141733 4784 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141742 4784 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141753 4784 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141762 4784 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141771 4784 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.141779 4784 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.141796 4784 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142265 4784 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142288 4784 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142297 4784 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142307 4784 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142315 4784 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142324 4784 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142333 4784 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142348 4784 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142357 4784 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142367 4784 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142376 4784 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142385 4784 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142397 4784 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142408 4784 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142417 4784 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142427 4784 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142436 4784 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142446 4784 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142455 4784 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142464 4784 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142474 4784 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142483 4784 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142492 4784 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142501 4784 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142511 4784 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142520 4784 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142530 4784 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142538 4784 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142570 4784 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142578 4784 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142587 4784 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142597 4784 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142606 4784 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142615 4784 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142624 4784 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142633 4784 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142643 4784 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142652 4784 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142662 4784 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142671 4784 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142679 4784 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142689 4784 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142698 4784 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142707 4784 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142717 4784 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142726 4784 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142735 4784 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142744 4784 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142757 4784 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142767 4784 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142777 4784 feature_gate.go:330] unrecognized feature gate: Example Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142786 4784 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142796 4784 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142807 4784 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142816 4784 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142825 4784 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142836 4784 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142846 4784 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142855 4784 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142865 4784 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142875 4784 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142888 4784 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142900 4784 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142912 4784 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142924 4784 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142933 4784 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142942 4784 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142951 4784 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142961 4784 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142973 4784 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.142984 4784 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.142998 4784 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.143502 4784 server.go:940] "Client rotation is on, will bootstrap in background" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.149118 4784 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.149218 4784 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.149741 4784 server.go:997] "Starting client certificate rotation" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.149760 4784 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.150082 4784 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-08 15:20:44.941369785 +0000 UTC Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.150159 4784 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.167444 4784 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 06 08:14:58 crc kubenswrapper[4784]: E0106 08:14:58.168344 4784 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.102:6443: connect: connection refused" logger="UnhandledError" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.170580 4784 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.177891 4784 log.go:25] "Validated CRI v1 runtime API" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.193629 4784 log.go:25] "Validated CRI v1 image API" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.195862 4784 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.198903 4784 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-06-08-10-26-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.198972 4784 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.227898 4784 manager.go:217] Machine: {Timestamp:2026-01-06 08:14:58.225876251 +0000 UTC m=+0.272049138 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:4602588f-a4e8-4d03-9d3c-a153f288ba5f BootID:f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95 Filesystems:[{Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:68:ae:cf Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:68:ae:cf Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:c0:c4:b5 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:7c:b1:a1 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:d2:a6:a3 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:aa:ac:88 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:d9:36:da Speed:-1 Mtu:1496} {Name:eth10 MacAddress:fe:95:b8:af:3e:e2 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:16:d1:e7:46:20:93 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.228230 4784 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.228507 4784 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.235715 4784 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.236003 4784 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.236063 4784 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.236359 4784 topology_manager.go:138] "Creating topology manager with none policy" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.236377 4784 container_manager_linux.go:303] "Creating device plugin manager" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.236708 4784 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.236763 4784 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.237362 4784 state_mem.go:36] "Initialized new in-memory state store" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.237503 4784 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.238518 4784 kubelet.go:418] "Attempting to sync node with API server" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.238606 4784 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.238643 4784 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.238662 4784 kubelet.go:324] "Adding apiserver pod source" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.238685 4784 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.240724 4784 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.240995 4784 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.102:6443: connect: connection refused Jan 06 08:14:58 crc kubenswrapper[4784]: E0106 08:14:58.241115 4784 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.102:6443: connect: connection refused" logger="UnhandledError" Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.240991 4784 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.102:6443: connect: connection refused Jan 06 08:14:58 crc kubenswrapper[4784]: E0106 08:14:58.241187 4784 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.102:6443: connect: connection refused" logger="UnhandledError" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.247396 4784 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.248392 4784 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.249055 4784 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.249086 4784 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.249097 4784 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.249107 4784 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.249122 4784 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.249132 4784 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.249152 4784 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.249167 4784 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.249179 4784 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.249190 4784 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.249204 4784 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.249215 4784 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.249495 4784 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.251347 4784 server.go:1280] "Started kubelet" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.251635 4784 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.102:6443: connect: connection refused Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.251904 4784 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.251902 4784 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.253616 4784 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 06 08:14:58 crc systemd[1]: Started Kubernetes Kubelet. Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.255447 4784 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.255500 4784 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.255617 4784 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 15:55:26.674643244 +0000 UTC Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.255917 4784 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.255929 4784 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.256042 4784 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 06 08:14:58 crc kubenswrapper[4784]: E0106 08:14:58.256209 4784 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.256826 4784 server.go:460] "Adding debug handlers to kubelet server" Jan 06 08:14:58 crc kubenswrapper[4784]: E0106 08:14:58.254900 4784 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.102:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.1888163b6e39d92b default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-06 08:14:58.251086123 +0000 UTC m=+0.297258960,LastTimestamp:2026-01-06 08:14:58.251086123 +0000 UTC m=+0.297258960,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.258592 4784 factory.go:55] Registering systemd factory Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.258619 4784 factory.go:221] Registration of the systemd container factory successfully Jan 06 08:14:58 crc kubenswrapper[4784]: E0106 08:14:58.261013 4784 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" interval="200ms" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.261167 4784 factory.go:153] Registering CRI-O factory Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.261178 4784 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.102:6443: connect: connection refused Jan 06 08:14:58 crc kubenswrapper[4784]: E0106 08:14:58.261254 4784 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.102:6443: connect: connection refused" logger="UnhandledError" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.261319 4784 factory.go:221] Registration of the crio container factory successfully Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.261426 4784 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.261511 4784 factory.go:103] Registering Raw factory Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.261640 4784 manager.go:1196] Started watching for new ooms in manager Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.262334 4784 manager.go:319] Starting recovery of all containers Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.266049 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.266133 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.266844 4784 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.266871 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.266986 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267003 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267013 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267028 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267138 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267192 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267365 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267385 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267397 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267409 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267528 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267699 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267712 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267727 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267738 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267854 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267865 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267876 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.267887 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268004 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268051 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268161 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268183 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268199 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268211 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268319 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268336 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268379 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268497 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268510 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268522 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268534 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268600 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268614 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268686 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.268699 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.269149 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.269164 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.269195 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.269209 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.269222 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.269236 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.269252 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.269284 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.269296 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.269308 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.269323 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.269430 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.269469 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.269488 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270227 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270262 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270288 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270305 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270320 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270338 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270353 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270368 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270381 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270400 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270414 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270426 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270439 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270452 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270464 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270476 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270489 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270504 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270519 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270531 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270590 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270603 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270616 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270629 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270641 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270655 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270669 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270681 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270693 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270706 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270717 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270730 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270741 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270752 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270769 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270781 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270793 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270836 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270854 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270868 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270881 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270893 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270909 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270924 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270939 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270953 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270965 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270977 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.270991 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271006 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271025 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271053 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271071 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271087 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271101 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271116 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271179 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271197 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271213 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271229 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271244 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271257 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271272 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271288 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271302 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271316 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271328 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271339 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271350 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271361 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271372 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271383 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271394 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271404 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271414 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271425 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271436 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271448 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271463 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271477 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271492 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271509 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271528 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271560 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271578 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271602 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271618 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271635 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271649 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271666 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271681 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271695 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271713 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271726 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271740 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271754 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271767 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271781 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271796 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271810 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271824 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271837 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271850 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271865 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271877 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271889 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271905 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271919 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271934 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271952 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271966 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271981 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.271998 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272011 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272025 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272038 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272053 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272067 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272080 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272094 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272113 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272128 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272146 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272169 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272183 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272194 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272205 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272216 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272226 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272236 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272247 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272258 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272267 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272278 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272287 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272297 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272307 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272318 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272335 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272355 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272371 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272385 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272398 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272413 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272426 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272441 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272455 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272468 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272482 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272505 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272519 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272536 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272559 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272571 4784 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272581 4784 reconstruct.go:97] "Volume reconstruction finished" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.272591 4784 reconciler.go:26] "Reconciler: start to sync state" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.282060 4784 manager.go:324] Recovery completed Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.295595 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.297788 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.297835 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.297845 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.302086 4784 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.302117 4784 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.302152 4784 state_mem.go:36] "Initialized new in-memory state store" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.308764 4784 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.310719 4784 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.310768 4784 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.310789 4784 policy_none.go:49] "None policy: Start" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.310806 4784 kubelet.go:2335] "Starting kubelet main sync loop" Jan 06 08:14:58 crc kubenswrapper[4784]: E0106 08:14:58.311025 4784 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.312672 4784 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.312706 4784 state_mem.go:35] "Initializing new in-memory state store" Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.312720 4784 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.102:6443: connect: connection refused Jan 06 08:14:58 crc kubenswrapper[4784]: E0106 08:14:58.312802 4784 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.102:6443: connect: connection refused" logger="UnhandledError" Jan 06 08:14:58 crc kubenswrapper[4784]: E0106 08:14:58.356393 4784 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.387968 4784 manager.go:334] "Starting Device Plugin manager" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.388053 4784 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.388068 4784 server.go:79] "Starting device plugin registration server" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.388729 4784 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.388751 4784 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.390326 4784 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.390515 4784 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.390524 4784 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 06 08:14:58 crc kubenswrapper[4784]: E0106 08:14:58.402413 4784 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.412129 4784 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc"] Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.412212 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.413195 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.413263 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.413274 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.413566 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.413910 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.414005 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.414672 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.414705 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.414717 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.414871 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.415054 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.415109 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.415142 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.415285 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.415300 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.415518 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.415562 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.415576 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.415685 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.415815 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.415845 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.416642 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.416673 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.416686 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.416756 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.416800 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.416813 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.416829 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.416995 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.417036 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.417172 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.417224 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.417243 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.417568 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.417602 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.417614 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.417833 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.417863 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.418803 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.418842 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.418859 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.418910 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.418928 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.418937 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:58 crc kubenswrapper[4784]: E0106 08:14:58.462338 4784 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" interval="400ms" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.476057 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.476120 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.476148 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.476172 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.476196 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.476219 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.476245 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.476277 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.476301 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.476323 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.476347 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.476371 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.476393 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.476417 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.476437 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.489232 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.490972 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.491027 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.491268 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.491311 4784 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 06 08:14:58 crc kubenswrapper[4784]: E0106 08:14:58.492063 4784 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.102:6443: connect: connection refused" node="crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.577901 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.578182 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.578314 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.578432 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.578257 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.578434 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.578158 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.578501 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.578574 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.578875 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.578946 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.578885 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579049 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579112 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579116 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579167 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579188 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579204 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579200 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579219 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579236 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579251 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579327 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579372 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579419 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579466 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579501 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579496 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579618 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.579752 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.692413 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.694870 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.695257 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.695631 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.695788 4784 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 06 08:14:58 crc kubenswrapper[4784]: E0106 08:14:58.696609 4784 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.102:6443: connect: connection refused" node="crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.745361 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.765331 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.778195 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-0f5a5bbf26dbdd550895ceaa1e63f9a949a4bceac22c309d617719b49f9a54a6 WatchSource:0}: Error finding container 0f5a5bbf26dbdd550895ceaa1e63f9a949a4bceac22c309d617719b49f9a54a6: Status 404 returned error can't find the container with id 0f5a5bbf26dbdd550895ceaa1e63f9a949a4bceac22c309d617719b49f9a54a6 Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.784536 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-dfbf10e1ea7969536b5a14314980e0f9dc35b846997217087f5e675a58c3620d WatchSource:0}: Error finding container dfbf10e1ea7969536b5a14314980e0f9dc35b846997217087f5e675a58c3620d: Status 404 returned error can't find the container with id dfbf10e1ea7969536b5a14314980e0f9dc35b846997217087f5e675a58c3620d Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.795918 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.806362 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.808796 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-10711c27350a88893fdda2944e8a3d9312711eaebfe28f810b9a4f434a2c9158 WatchSource:0}: Error finding container 10711c27350a88893fdda2944e8a3d9312711eaebfe28f810b9a4f434a2c9158: Status 404 returned error can't find the container with id 10711c27350a88893fdda2944e8a3d9312711eaebfe28f810b9a4f434a2c9158 Jan 06 08:14:58 crc kubenswrapper[4784]: I0106 08:14:58.810770 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.828652 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-65f2d1775f9d24d305f57adff8ce2ff6aaae9af2e66a57ec311db8e1f137899b WatchSource:0}: Error finding container 65f2d1775f9d24d305f57adff8ce2ff6aaae9af2e66a57ec311db8e1f137899b: Status 404 returned error can't find the container with id 65f2d1775f9d24d305f57adff8ce2ff6aaae9af2e66a57ec311db8e1f137899b Jan 06 08:14:58 crc kubenswrapper[4784]: W0106 08:14:58.832638 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-505196dac87c154508e16ecd990e8c5637ab125ef74097826230ae383e75c474 WatchSource:0}: Error finding container 505196dac87c154508e16ecd990e8c5637ab125ef74097826230ae383e75c474: Status 404 returned error can't find the container with id 505196dac87c154508e16ecd990e8c5637ab125ef74097826230ae383e75c474 Jan 06 08:14:58 crc kubenswrapper[4784]: E0106 08:14:58.863238 4784 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" interval="800ms" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.097088 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.098420 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.098469 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.098479 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.098519 4784 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 06 08:14:59 crc kubenswrapper[4784]: E0106 08:14:59.099081 4784 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.102:6443: connect: connection refused" node="crc" Jan 06 08:14:59 crc kubenswrapper[4784]: W0106 08:14:59.127935 4784 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.102:6443: connect: connection refused Jan 06 08:14:59 crc kubenswrapper[4784]: E0106 08:14:59.128045 4784 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.102:6443: connect: connection refused" logger="UnhandledError" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.252607 4784 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.102:6443: connect: connection refused Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.255718 4784 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 20:34:59.089121057 +0000 UTC Jan 06 08:14:59 crc kubenswrapper[4784]: W0106 08:14:59.301463 4784 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.102:6443: connect: connection refused Jan 06 08:14:59 crc kubenswrapper[4784]: E0106 08:14:59.301574 4784 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.102:6443: connect: connection refused" logger="UnhandledError" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.318225 4784 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="d35dfaea112bda776f476f94a84d98c7a45e08f2c51036f63a30eb378142020d" exitCode=0 Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.318302 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"d35dfaea112bda776f476f94a84d98c7a45e08f2c51036f63a30eb378142020d"} Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.318450 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"65f2d1775f9d24d305f57adff8ce2ff6aaae9af2e66a57ec311db8e1f137899b"} Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.318660 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.320313 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966"} Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.320372 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"10711c27350a88893fdda2944e8a3d9312711eaebfe28f810b9a4f434a2c9158"} Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.320304 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.320417 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.320431 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.323252 4784 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655" exitCode=0 Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.323347 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655"} Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.323395 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"dfbf10e1ea7969536b5a14314980e0f9dc35b846997217087f5e675a58c3620d"} Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.323488 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.324611 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.324642 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.324652 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.325572 4784 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210" exitCode=0 Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.325829 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210"} Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.326143 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0f5a5bbf26dbdd550895ceaa1e63f9a949a4bceac22c309d617719b49f9a54a6"} Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.326231 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.327361 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.327383 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.327392 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.329437 4784 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e" exitCode=0 Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.329474 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e"} Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.329496 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"505196dac87c154508e16ecd990e8c5637ab125ef74097826230ae383e75c474"} Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.330154 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.331179 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.331214 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.331226 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:59 crc kubenswrapper[4784]: W0106 08:14:59.481869 4784 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.102:6443: connect: connection refused Jan 06 08:14:59 crc kubenswrapper[4784]: E0106 08:14:59.481943 4784 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.102:6443: connect: connection refused" logger="UnhandledError" Jan 06 08:14:59 crc kubenswrapper[4784]: W0106 08:14:59.518182 4784 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.102:6443: connect: connection refused Jan 06 08:14:59 crc kubenswrapper[4784]: E0106 08:14:59.518513 4784 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.102:6443: connect: connection refused" logger="UnhandledError" Jan 06 08:14:59 crc kubenswrapper[4784]: E0106 08:14:59.664697 4784 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" interval="1.6s" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.899176 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.901128 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.901171 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.901185 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:14:59 crc kubenswrapper[4784]: I0106 08:14:59.901216 4784 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 06 08:14:59 crc kubenswrapper[4784]: E0106 08:14:59.901694 4784 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.102:6443: connect: connection refused" node="crc" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.184356 4784 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 06 08:15:00 crc kubenswrapper[4784]: E0106 08:15:00.185515 4784 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.102:6443: connect: connection refused" logger="UnhandledError" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.256374 4784 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 01:11:46.814133631 +0000 UTC Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.256449 4784 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 208h56m46.557687216s for next certificate rotation Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.334379 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a"} Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.334417 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2"} Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.334429 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532"} Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.334439 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98"} Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.335989 4784 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="703e211b4eefc2eb4b2bb85dfbcf425fcb20e424dcb36af9504ae26d7e9906b6" exitCode=0 Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.336044 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"703e211b4eefc2eb4b2bb85dfbcf425fcb20e424dcb36af9504ae26d7e9906b6"} Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.336149 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.336823 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.336849 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.336858 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.339010 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0"} Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.339048 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf"} Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.339062 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df"} Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.339075 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.339803 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.339826 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.339835 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.341528 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.341877 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"86bd77cfbf0d2c7762b7ffae176effd11cb8be214d98b92e4c19d82bdd574621"} Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.341899 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"2c09368fd00349fc66f7f1e770c5553457be595c44283c8076f38d8fbdf94613"} Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.341908 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"cea0c4194d26b1d6e3a19a06f925dcb89e24e2ccacf253f91330141defd12532"} Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.341955 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.342497 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.342516 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.342534 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.342921 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.342939 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.342946 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:00 crc kubenswrapper[4784]: I0106 08:15:00.671206 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.347366 4784 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="10656748fb846e4325fb54271358ae5945f8b07e38c2c503d02c08ad1c400907" exitCode=0 Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.347457 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"10656748fb846e4325fb54271358ae5945f8b07e38c2c503d02c08ad1c400907"} Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.347712 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.348705 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.348744 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.348761 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.352505 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd"} Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.352652 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.354125 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.354159 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.354170 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.355193 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"31a7b648a0338258b4719dc2489659cbb9974805adf9f592f4a95aa561433c91"} Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.355284 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.355315 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.355293 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.356429 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.356472 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.356478 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.356499 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.356508 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.356513 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.356584 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.356605 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.356616 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.502226 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.503845 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.503902 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.503928 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:01 crc kubenswrapper[4784]: I0106 08:15:01.503967 4784 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 06 08:15:02 crc kubenswrapper[4784]: I0106 08:15:02.271360 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:15:02 crc kubenswrapper[4784]: I0106 08:15:02.363085 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e1acd2684da1d9b18f7f4316b3b0d2e4467114c556352fbfac2ee52cdbd690a3"} Jan 06 08:15:02 crc kubenswrapper[4784]: I0106 08:15:02.363148 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"89918edb3ee39dd36af490674dfac92a456dd748c1b463912d3ac90dc593d433"} Jan 06 08:15:02 crc kubenswrapper[4784]: I0106 08:15:02.363174 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4f6af050344e221c97333671e485113faccc52b13a5a44f5dcb28d462d56b78d"} Jan 06 08:15:02 crc kubenswrapper[4784]: I0106 08:15:02.363195 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:02 crc kubenswrapper[4784]: I0106 08:15:02.363195 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4ca141ead88c336c6283de282c5ace12280e66f956d81b84ddf87c78ce26ba36"} Jan 06 08:15:02 crc kubenswrapper[4784]: I0106 08:15:02.364893 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:02 crc kubenswrapper[4784]: I0106 08:15:02.364939 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:02 crc kubenswrapper[4784]: I0106 08:15:02.364951 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:03 crc kubenswrapper[4784]: I0106 08:15:03.183178 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:15:03 crc kubenswrapper[4784]: I0106 08:15:03.371046 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"0493343d4ae93f1cf27349de7734f1783b5898f405e99afa96220b1a84e1356e"} Jan 06 08:15:03 crc kubenswrapper[4784]: I0106 08:15:03.371124 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:03 crc kubenswrapper[4784]: I0106 08:15:03.371287 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:03 crc kubenswrapper[4784]: I0106 08:15:03.372421 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:03 crc kubenswrapper[4784]: I0106 08:15:03.372463 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:03 crc kubenswrapper[4784]: I0106 08:15:03.372477 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:03 crc kubenswrapper[4784]: I0106 08:15:03.372806 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:03 crc kubenswrapper[4784]: I0106 08:15:03.372854 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:03 crc kubenswrapper[4784]: I0106 08:15:03.372873 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:04 crc kubenswrapper[4784]: I0106 08:15:04.373999 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:04 crc kubenswrapper[4784]: I0106 08:15:04.374192 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:04 crc kubenswrapper[4784]: I0106 08:15:04.374963 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:04 crc kubenswrapper[4784]: I0106 08:15:04.374997 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:04 crc kubenswrapper[4784]: I0106 08:15:04.375009 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:04 crc kubenswrapper[4784]: I0106 08:15:04.375617 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:04 crc kubenswrapper[4784]: I0106 08:15:04.375684 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:04 crc kubenswrapper[4784]: I0106 08:15:04.375707 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:04 crc kubenswrapper[4784]: I0106 08:15:04.400165 4784 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 06 08:15:05 crc kubenswrapper[4784]: I0106 08:15:05.208641 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:15:05 crc kubenswrapper[4784]: I0106 08:15:05.377364 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:05 crc kubenswrapper[4784]: I0106 08:15:05.379025 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:05 crc kubenswrapper[4784]: I0106 08:15:05.379081 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:05 crc kubenswrapper[4784]: I0106 08:15:05.379105 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:05 crc kubenswrapper[4784]: I0106 08:15:05.582840 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:15:05 crc kubenswrapper[4784]: I0106 08:15:05.583026 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:05 crc kubenswrapper[4784]: I0106 08:15:05.584520 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:05 crc kubenswrapper[4784]: I0106 08:15:05.584624 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:05 crc kubenswrapper[4784]: I0106 08:15:05.584649 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:05 crc kubenswrapper[4784]: I0106 08:15:05.590445 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:15:06 crc kubenswrapper[4784]: I0106 08:15:06.273787 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 06 08:15:06 crc kubenswrapper[4784]: I0106 08:15:06.274040 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:06 crc kubenswrapper[4784]: I0106 08:15:06.275509 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:06 crc kubenswrapper[4784]: I0106 08:15:06.275594 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:06 crc kubenswrapper[4784]: I0106 08:15:06.275612 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:06 crc kubenswrapper[4784]: I0106 08:15:06.379609 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:06 crc kubenswrapper[4784]: I0106 08:15:06.379691 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:15:06 crc kubenswrapper[4784]: I0106 08:15:06.380827 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:06 crc kubenswrapper[4784]: I0106 08:15:06.380881 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:06 crc kubenswrapper[4784]: I0106 08:15:06.380898 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:07 crc kubenswrapper[4784]: I0106 08:15:07.311807 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:15:07 crc kubenswrapper[4784]: I0106 08:15:07.382037 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:07 crc kubenswrapper[4784]: I0106 08:15:07.383243 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:07 crc kubenswrapper[4784]: I0106 08:15:07.383302 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:07 crc kubenswrapper[4784]: I0106 08:15:07.383324 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:08 crc kubenswrapper[4784]: I0106 08:15:08.385504 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:08 crc kubenswrapper[4784]: I0106 08:15:08.386941 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:08 crc kubenswrapper[4784]: I0106 08:15:08.387005 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:08 crc kubenswrapper[4784]: I0106 08:15:08.387023 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:08 crc kubenswrapper[4784]: E0106 08:15:08.403114 4784 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 06 08:15:08 crc kubenswrapper[4784]: I0106 08:15:08.928016 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:15:09 crc kubenswrapper[4784]: I0106 08:15:09.394540 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:09 crc kubenswrapper[4784]: I0106 08:15:09.396397 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:09 crc kubenswrapper[4784]: I0106 08:15:09.396464 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:09 crc kubenswrapper[4784]: I0106 08:15:09.396492 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:09 crc kubenswrapper[4784]: I0106 08:15:09.400249 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:15:10 crc kubenswrapper[4784]: I0106 08:15:10.252452 4784 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 06 08:15:10 crc kubenswrapper[4784]: I0106 08:15:10.312694 4784 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 06 08:15:10 crc kubenswrapper[4784]: I0106 08:15:10.312822 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 06 08:15:10 crc kubenswrapper[4784]: I0106 08:15:10.409065 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:10 crc kubenswrapper[4784]: I0106 08:15:10.410275 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:10 crc kubenswrapper[4784]: I0106 08:15:10.410340 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:10 crc kubenswrapper[4784]: I0106 08:15:10.410358 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:10 crc kubenswrapper[4784]: I0106 08:15:10.927588 4784 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 06 08:15:10 crc kubenswrapper[4784]: I0106 08:15:10.927658 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 06 08:15:10 crc kubenswrapper[4784]: I0106 08:15:10.933676 4784 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 06 08:15:10 crc kubenswrapper[4784]: I0106 08:15:10.933747 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 06 08:15:11 crc kubenswrapper[4784]: I0106 08:15:11.999256 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 06 08:15:11 crc kubenswrapper[4784]: I0106 08:15:11.999413 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:12 crc kubenswrapper[4784]: I0106 08:15:12.006321 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:12 crc kubenswrapper[4784]: I0106 08:15:12.006392 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:12 crc kubenswrapper[4784]: I0106 08:15:12.006406 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:12 crc kubenswrapper[4784]: I0106 08:15:12.042836 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 06 08:15:12 crc kubenswrapper[4784]: I0106 08:15:12.414625 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:12 crc kubenswrapper[4784]: I0106 08:15:12.416090 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:12 crc kubenswrapper[4784]: I0106 08:15:12.416153 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:12 crc kubenswrapper[4784]: I0106 08:15:12.416172 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:12 crc kubenswrapper[4784]: I0106 08:15:12.436701 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 06 08:15:13 crc kubenswrapper[4784]: I0106 08:15:13.416964 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:13 crc kubenswrapper[4784]: I0106 08:15:13.418676 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:13 crc kubenswrapper[4784]: I0106 08:15:13.418780 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:13 crc kubenswrapper[4784]: I0106 08:15:13.418803 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.218068 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.218292 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.219762 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.219842 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.219873 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.225755 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.422108 4784 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.422182 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.423418 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.423486 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.423514 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:15 crc kubenswrapper[4784]: E0106 08:15:15.910475 4784 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="3.2s" Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.915182 4784 trace.go:236] Trace[1276038264]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (06-Jan-2026 08:15:01.493) (total time: 14421ms): Jan 06 08:15:15 crc kubenswrapper[4784]: Trace[1276038264]: ---"Objects listed" error: 14421ms (08:15:15.915) Jan 06 08:15:15 crc kubenswrapper[4784]: Trace[1276038264]: [14.421680944s] [14.421680944s] END Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.915237 4784 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.916233 4784 trace.go:236] Trace[121150075]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (06-Jan-2026 08:15:01.888) (total time: 14027ms): Jan 06 08:15:15 crc kubenswrapper[4784]: Trace[121150075]: ---"Objects listed" error: 14027ms (08:15:15.916) Jan 06 08:15:15 crc kubenswrapper[4784]: Trace[121150075]: [14.027716689s] [14.027716689s] END Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.916277 4784 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 06 08:15:15 crc kubenswrapper[4784]: E0106 08:15:15.916729 4784 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.917699 4784 trace.go:236] Trace[910649990]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (06-Jan-2026 08:15:01.440) (total time: 14476ms): Jan 06 08:15:15 crc kubenswrapper[4784]: Trace[910649990]: ---"Objects listed" error: 14476ms (08:15:15.917) Jan 06 08:15:15 crc kubenswrapper[4784]: Trace[910649990]: [14.476792411s] [14.476792411s] END Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.917741 4784 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.918821 4784 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.919427 4784 trace.go:236] Trace[704885182]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (06-Jan-2026 08:15:01.980) (total time: 13938ms): Jan 06 08:15:15 crc kubenswrapper[4784]: Trace[704885182]: ---"Objects listed" error: 13938ms (08:15:15.919) Jan 06 08:15:15 crc kubenswrapper[4784]: Trace[704885182]: [13.938873249s] [13.938873249s] END Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.919470 4784 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 06 08:15:15 crc kubenswrapper[4784]: I0106 08:15:15.939859 4784 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.247349 4784 apiserver.go:52] "Watching apiserver" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.250115 4784 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.250381 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.250777 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.250861 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.250872 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.251066 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.251139 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.251346 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.251370 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.251861 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.252132 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.253381 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.255012 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.255080 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.255123 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.256511 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.256557 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.256651 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.256723 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.256860 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.257583 4784 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.294509 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.306750 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.320377 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321633 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321685 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321713 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321735 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321758 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321779 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321801 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321823 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321847 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321874 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321895 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321916 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321939 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321961 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321962 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321986 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322013 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322039 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322062 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322084 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322106 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322128 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322152 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322175 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322197 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322223 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322248 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322271 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322291 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322316 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322339 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322364 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322415 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322439 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322461 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322484 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322512 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322566 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322591 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322614 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322638 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322662 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322688 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322716 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322740 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322765 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322803 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322827 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322851 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322875 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322901 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322926 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322951 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322975 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322998 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323021 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323044 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323071 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323093 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323118 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323145 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323173 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323197 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323221 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323246 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323272 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323296 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323319 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323343 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323366 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323395 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323421 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323446 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323474 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323494 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323516 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323556 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323581 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323712 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323742 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323765 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323788 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323808 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323833 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323855 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323879 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323901 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323923 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323946 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323972 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323999 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324023 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324049 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324072 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324093 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324116 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324141 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324167 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324190 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324213 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.321963 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322115 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322489 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322521 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322630 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322749 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322859 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322878 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.322984 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323133 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323227 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323307 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323365 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323367 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323595 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323705 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323837 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323904 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323977 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.323982 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324230 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324503 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324561 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324600 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.329462 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324809 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324821 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324881 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.324973 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.325033 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.325163 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.325229 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.325255 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.325379 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.325370 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.325447 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.325491 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.325645 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.325712 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.325737 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.325777 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.325781 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.325851 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.329743 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.326108 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:15:16.826086139 +0000 UTC m=+18.872258976 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.330033 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.330068 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.330041 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.326942 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.326833 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.327147 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.327251 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.327262 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.327632 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.327730 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.327846 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.327887 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.328139 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.328275 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.328522 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.328562 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.328678 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.328672 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.328755 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.328812 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.328819 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.329288 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.329339 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.329372 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.329389 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.329825 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.329834 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.330073 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331023 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.327072 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331057 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331089 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331119 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331142 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331163 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331183 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331202 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331221 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331242 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331261 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331282 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331303 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331323 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331343 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331363 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331386 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331410 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331434 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331456 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331479 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331505 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331531 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331576 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331596 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331622 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331642 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331662 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331682 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331704 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331724 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331747 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331769 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331862 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331886 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331905 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331928 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331955 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331974 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.331996 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332016 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332038 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332158 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332248 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332344 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332484 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332519 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332728 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332790 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332800 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332813 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332838 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332858 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332881 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332903 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332909 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.332924 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333110 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333144 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333160 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333185 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333191 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333222 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333247 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333273 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333296 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333329 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333354 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333375 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333395 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333466 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333489 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333510 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333530 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333570 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333591 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333612 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333634 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333656 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333678 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333699 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.334954 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.334993 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335016 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335042 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335092 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335115 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335138 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335162 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335183 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335205 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335225 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335247 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335321 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335345 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335367 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335388 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335409 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335432 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335453 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335476 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335500 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.336074 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339286 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339351 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339387 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339414 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339437 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339483 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339507 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339529 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339569 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339600 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339625 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339646 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339668 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339691 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339713 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339817 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339832 4784 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339846 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339858 4784 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339870 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339884 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339897 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339908 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339920 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339932 4784 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339943 4784 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339955 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339966 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339978 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339990 4784 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340003 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340015 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340025 4784 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340036 4784 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340048 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340059 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340071 4784 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340082 4784 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340093 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340104 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340116 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340127 4784 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340139 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340149 4784 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340160 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340177 4784 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340188 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340199 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340211 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340223 4784 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340235 4784 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340246 4784 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340257 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340269 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340280 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340291 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340307 4784 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340318 4784 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340331 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340343 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340353 4784 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340365 4784 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340376 4784 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340402 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340413 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340425 4784 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340470 4784 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342519 4784 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342558 4784 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342575 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342599 4784 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342612 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342623 4784 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342633 4784 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342648 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342663 4784 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342674 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342689 4784 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342710 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342720 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342730 4784 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342739 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342752 4784 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342763 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342772 4784 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342788 4784 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342798 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342808 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342818 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342830 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342839 4784 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342874 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.343290 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.343311 4784 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.343321 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.343331 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.343346 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.344139 4784 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333191 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333285 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333292 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333455 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333489 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333586 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333745 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.333900 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.334136 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.334187 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.334198 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.334132 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335389 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335452 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.335759 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.336084 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.344832 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.336436 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.336501 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.336925 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.336232 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.336925 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.337425 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.337673 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.337876 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.338123 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.338373 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.338920 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.339226 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.340434 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342093 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342291 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342485 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342512 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.342678 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.343304 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.344076 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.344107 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.344755 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.345035 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.345204 4784 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.345276 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:16.845256553 +0000 UTC m=+18.891429390 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.345524 4784 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.345593 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:16.845581743 +0000 UTC m=+18.891754580 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.345957 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.345986 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.346227 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.346536 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.346596 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.347077 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.347173 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.347248 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.347401 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.347773 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.347786 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.347800 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.348724 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.348752 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.348915 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.348671 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.349647 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.349757 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.350227 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.348263 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.348411 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.351032 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.353405 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.353750 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.353865 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.354282 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.354340 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.354624 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.354719 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.348155 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.354915 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.355029 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.355079 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.355103 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.355342 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.355430 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.356731 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.357050 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.358949 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.358977 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.359017 4784 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.359034 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.359110 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:16.859067161 +0000 UTC m=+18.905240018 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.359810 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.361085 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.361136 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.361155 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.361754 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.361771 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.361950 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.362262 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.363158 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.367211 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.368867 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.369217 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.370779 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.370804 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.370820 4784 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.370873 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:16.870854837 +0000 UTC m=+18.917027904 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.371442 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.371858 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.371925 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.372554 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.372949 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.373830 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.374723 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.375277 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.375487 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.375675 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.376152 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.376531 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.376843 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.376929 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.377106 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.377227 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.377178 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.377138 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.377325 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.377934 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.377980 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.378139 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.378245 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.378301 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.378561 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.378333 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.379093 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.379442 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.381674 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.384111 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.389029 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.390398 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.397085 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.443910 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.444086 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.444744 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.444857 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.444916 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.444943 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.444954 4784 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.444964 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.444973 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.444981 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.444990 4784 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445000 4784 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445009 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445017 4784 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445026 4784 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445034 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445042 4784 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445052 4784 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445061 4784 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445069 4784 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445099 4784 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445108 4784 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445117 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445127 4784 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445136 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445144 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445154 4784 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445163 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445171 4784 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445180 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445190 4784 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445202 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445213 4784 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445223 4784 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445233 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445243 4784 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445253 4784 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445262 4784 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445272 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445282 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445291 4784 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445301 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445311 4784 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445322 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445330 4784 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445339 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445348 4784 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445356 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445365 4784 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445374 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445384 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445391 4784 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445399 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445410 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445418 4784 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445426 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445434 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445445 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445456 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445469 4784 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445478 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445489 4784 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445498 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445507 4784 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445517 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445525 4784 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445536 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445564 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445573 4784 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445584 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445594 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445604 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445613 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445621 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445630 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445637 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445658 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445669 4784 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445681 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445692 4784 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445700 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445708 4784 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445716 4784 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445725 4784 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445732 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445740 4784 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445752 4784 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445760 4784 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445769 4784 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445779 4784 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445789 4784 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445799 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445807 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445815 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445823 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445830 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445839 4784 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445847 4784 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445855 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445862 4784 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445870 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445878 4784 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445887 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445895 4784 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445903 4784 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445911 4784 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445919 4784 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445927 4784 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445935 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445944 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445952 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445962 4784 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445970 4784 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445978 4784 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445986 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.445995 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.453735 4784 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:52388->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.453811 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:52388->192.168.126.11:17697: read: connection reset by peer" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.454134 4784 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.454164 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.568435 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.576726 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 06 08:15:16 crc kubenswrapper[4784]: W0106 08:15:16.580523 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-2594c93aa37b8d271983d8fab06f9fc72fe843192a978b857de29f9144b6f876 WatchSource:0}: Error finding container 2594c93aa37b8d271983d8fab06f9fc72fe843192a978b857de29f9144b6f876: Status 404 returned error can't find the container with id 2594c93aa37b8d271983d8fab06f9fc72fe843192a978b857de29f9144b6f876 Jan 06 08:15:16 crc kubenswrapper[4784]: W0106 08:15:16.587752 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-bb0cb229f82046254f100bb6f352d5c8f1eac8a95de90b97fd6e1f8878b5913e WatchSource:0}: Error finding container bb0cb229f82046254f100bb6f352d5c8f1eac8a95de90b97fd6e1f8878b5913e: Status 404 returned error can't find the container with id bb0cb229f82046254f100bb6f352d5c8f1eac8a95de90b97fd6e1f8878b5913e Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.595271 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 06 08:15:16 crc kubenswrapper[4784]: W0106 08:15:16.610614 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-655139becfbb583d6bc4dc46155d97e1e74c1abdc5f6d07acf8c69cff4744da0 WatchSource:0}: Error finding container 655139becfbb583d6bc4dc46155d97e1e74c1abdc5f6d07acf8c69cff4744da0: Status 404 returned error can't find the container with id 655139becfbb583d6bc4dc46155d97e1e74c1abdc5f6d07acf8c69cff4744da0 Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.848519 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.848636 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.848683 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:15:17.848651951 +0000 UTC m=+19.894824788 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.848714 4784 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.848756 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.848769 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:17.848753605 +0000 UTC m=+19.894926452 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.848910 4784 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.848953 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:17.848943731 +0000 UTC m=+19.895116628 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.949634 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:16 crc kubenswrapper[4784]: I0106 08:15:16.949705 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.949837 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.949860 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.949884 4784 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.949901 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.949930 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:17.949916562 +0000 UTC m=+19.996089399 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.949942 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.949960 4784 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:16 crc kubenswrapper[4784]: E0106 08:15:16.950030 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:17.950007424 +0000 UTC m=+19.996180281 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.318200 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.324694 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.333283 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.339824 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.355152 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.370493 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.386362 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.405953 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.435223 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060"} Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.435314 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3"} Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.435340 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"bb0cb229f82046254f100bb6f352d5c8f1eac8a95de90b97fd6e1f8878b5913e"} Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.437107 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.437964 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3"} Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.437993 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"2594c93aa37b8d271983d8fab06f9fc72fe843192a978b857de29f9144b6f876"} Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.440197 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.442622 4784 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd" exitCode=255 Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.442690 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd"} Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.444246 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"655139becfbb583d6bc4dc46155d97e1e74c1abdc5f6d07acf8c69cff4744da0"} Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.457439 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.457759 4784 scope.go:117] "RemoveContainer" containerID="44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.479600 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: E0106 08:15:17.485376 4784 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.498317 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.516089 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.531940 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.547381 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.565186 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.578157 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.594793 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.610779 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.625754 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.639721 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.651495 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.662987 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.675475 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.691123 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.856477 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.856582 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.856607 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:17 crc kubenswrapper[4784]: E0106 08:15:17.856739 4784 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:15:17 crc kubenswrapper[4784]: E0106 08:15:17.856793 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:19.85677714 +0000 UTC m=+21.902949987 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:15:17 crc kubenswrapper[4784]: E0106 08:15:17.857128 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:15:19.857118981 +0000 UTC m=+21.903291828 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:15:17 crc kubenswrapper[4784]: E0106 08:15:17.857192 4784 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:15:17 crc kubenswrapper[4784]: E0106 08:15:17.857221 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:19.857212314 +0000 UTC m=+21.903385161 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.957693 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:17 crc kubenswrapper[4784]: I0106 08:15:17.957763 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:17 crc kubenswrapper[4784]: E0106 08:15:17.957848 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:15:17 crc kubenswrapper[4784]: E0106 08:15:17.957868 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:15:17 crc kubenswrapper[4784]: E0106 08:15:17.957870 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:15:17 crc kubenswrapper[4784]: E0106 08:15:17.957878 4784 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:17 crc kubenswrapper[4784]: E0106 08:15:17.957890 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:15:17 crc kubenswrapper[4784]: E0106 08:15:17.957902 4784 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:17 crc kubenswrapper[4784]: E0106 08:15:17.957924 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:19.957911166 +0000 UTC m=+22.004084003 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:17 crc kubenswrapper[4784]: E0106 08:15:17.957942 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:19.957930657 +0000 UTC m=+22.004103494 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.311714 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.312190 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:18 crc kubenswrapper[4784]: E0106 08:15:18.312426 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.312654 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:18 crc kubenswrapper[4784]: E0106 08:15:18.313019 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:18 crc kubenswrapper[4784]: E0106 08:15:18.313149 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.319164 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.320263 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.322818 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.323613 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.324276 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.324868 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.325613 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.326343 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.327122 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.327805 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.328373 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.329186 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.329353 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.329771 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.330355 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.330986 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.331648 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.332299 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.333739 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.334972 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.336185 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.337921 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.339073 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.340693 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.341636 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.342340 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.343017 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.343767 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.344272 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.344922 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.345432 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.345969 4784 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.346072 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.347361 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.347864 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.348307 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.349504 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.350213 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.350317 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.354236 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.355147 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.356602 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.357153 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.358393 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.359494 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.360519 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.361128 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.362054 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.362522 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.363834 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.364282 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.365129 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.365633 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.366563 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.367107 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.367604 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.368441 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.387065 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.408186 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.425729 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.438204 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.448585 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.450410 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff"} Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.458411 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.471656 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.488527 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.503861 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.524826 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.538848 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.561653 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.584382 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:18 crc kubenswrapper[4784]: I0106 08:15:18.600844 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.118077 4784 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.121122 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.121173 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.121192 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.121249 4784 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.132058 4784 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.132407 4784 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.133672 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.133723 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.133776 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.133801 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.133819 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:19Z","lastTransitionTime":"2026-01-06T08:15:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.161997 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:19Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.168108 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.168143 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.168153 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.168165 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.168176 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:19Z","lastTransitionTime":"2026-01-06T08:15:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.185338 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:19Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.189733 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.189777 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.189786 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.189801 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.189813 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:19Z","lastTransitionTime":"2026-01-06T08:15:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.204581 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:19Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.208980 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.209035 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.209055 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.209079 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.209098 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:19Z","lastTransitionTime":"2026-01-06T08:15:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.236284 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:19Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.240574 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.240629 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.240648 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.240671 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.240688 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:19Z","lastTransitionTime":"2026-01-06T08:15:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.255539 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:19Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.255857 4784 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.258319 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.258403 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.258419 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.258445 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.258518 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:19Z","lastTransitionTime":"2026-01-06T08:15:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.361742 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.361798 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.361812 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.361837 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.361856 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:19Z","lastTransitionTime":"2026-01-06T08:15:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.454969 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379"} Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.455376 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.464114 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.464161 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.464171 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.464187 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.464199 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:19Z","lastTransitionTime":"2026-01-06T08:15:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.473310 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:19Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.490103 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:19Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.506525 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:19Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.529485 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:19Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.547385 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:19Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.566856 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:19Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.567286 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.567316 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.567328 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.567346 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.567356 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:19Z","lastTransitionTime":"2026-01-06T08:15:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.587625 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:19Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.605878 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:19Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.670244 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.670308 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.670327 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.670353 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.670370 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:19Z","lastTransitionTime":"2026-01-06T08:15:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.773179 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.773232 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.773247 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.773269 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.773288 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:19Z","lastTransitionTime":"2026-01-06T08:15:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.871913 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.872006 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.872048 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.872077 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:15:23.872045337 +0000 UTC m=+25.918218214 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.872172 4784 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.872177 4784 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.872229 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:23.872217843 +0000 UTC m=+25.918390690 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.872263 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:23.872240683 +0000 UTC m=+25.918413560 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.876059 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.876124 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.876143 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.876168 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.876188 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:19Z","lastTransitionTime":"2026-01-06T08:15:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.973302 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.973395 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.973599 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.973599 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.973627 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.973644 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.973651 4784 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.973663 4784 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.973730 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:23.97370664 +0000 UTC m=+26.019879507 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:19 crc kubenswrapper[4784]: E0106 08:15:19.973758 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:23.973745741 +0000 UTC m=+26.019918618 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.979536 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.979609 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.979627 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.979650 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:19 crc kubenswrapper[4784]: I0106 08:15:19.979667 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:19Z","lastTransitionTime":"2026-01-06T08:15:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.082018 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.082063 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.082096 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.082115 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.082127 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:20Z","lastTransitionTime":"2026-01-06T08:15:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.184898 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.184968 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.184990 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.185019 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.185044 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:20Z","lastTransitionTime":"2026-01-06T08:15:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.287877 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.287937 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.287954 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.288047 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.288065 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:20Z","lastTransitionTime":"2026-01-06T08:15:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.311302 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.311378 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.311401 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:20 crc kubenswrapper[4784]: E0106 08:15:20.311510 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:20 crc kubenswrapper[4784]: E0106 08:15:20.312081 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:20 crc kubenswrapper[4784]: E0106 08:15:20.312243 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.391061 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.391143 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.391165 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.391191 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.391201 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:20Z","lastTransitionTime":"2026-01-06T08:15:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.494084 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.494131 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.494142 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.494158 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.494169 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:20Z","lastTransitionTime":"2026-01-06T08:15:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.597515 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.597571 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.597582 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.597597 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.597608 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:20Z","lastTransitionTime":"2026-01-06T08:15:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.700820 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.700900 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.700926 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.700950 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.700968 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:20Z","lastTransitionTime":"2026-01-06T08:15:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.803881 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.803940 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.803956 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.803975 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.803989 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:20Z","lastTransitionTime":"2026-01-06T08:15:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.906953 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.907016 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.907029 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.907048 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:20 crc kubenswrapper[4784]: I0106 08:15:20.907060 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:20Z","lastTransitionTime":"2026-01-06T08:15:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.010289 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.010363 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.010386 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.010416 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.010439 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:21Z","lastTransitionTime":"2026-01-06T08:15:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.112885 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.112949 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.112968 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.112992 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.113025 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:21Z","lastTransitionTime":"2026-01-06T08:15:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.215931 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.215974 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.215982 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.215994 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.216005 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:21Z","lastTransitionTime":"2026-01-06T08:15:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.318809 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.318855 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.318869 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.318888 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.318901 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:21Z","lastTransitionTime":"2026-01-06T08:15:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.380671 4784 csr.go:261] certificate signing request csr-d6ppf is approved, waiting to be issued Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.391934 4784 csr.go:257] certificate signing request csr-d6ppf is issued Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.402360 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-62dc6"] Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.402625 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-62dc6" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.404247 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.404765 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.405630 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.420833 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.420877 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.420888 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.420907 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.420919 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:21Z","lastTransitionTime":"2026-01-06T08:15:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.448230 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:21Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.511767 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:21Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.523024 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.523061 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.523069 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.523086 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.523097 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:21Z","lastTransitionTime":"2026-01-06T08:15:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.535800 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:21Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.558880 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:21Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.573879 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:21Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.586152 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:21Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.589439 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/101ca23a-112e-4976-bfec-16a98fcfbd0d-hosts-file\") pod \"node-resolver-62dc6\" (UID: \"101ca23a-112e-4976-bfec-16a98fcfbd0d\") " pod="openshift-dns/node-resolver-62dc6" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.589490 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6h2db\" (UniqueName: \"kubernetes.io/projected/101ca23a-112e-4976-bfec-16a98fcfbd0d-kube-api-access-6h2db\") pod \"node-resolver-62dc6\" (UID: \"101ca23a-112e-4976-bfec-16a98fcfbd0d\") " pod="openshift-dns/node-resolver-62dc6" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.597905 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:21Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.608427 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:21Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.622425 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:21Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.625136 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.625170 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.625178 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.625189 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.625197 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:21Z","lastTransitionTime":"2026-01-06T08:15:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.690075 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6h2db\" (UniqueName: \"kubernetes.io/projected/101ca23a-112e-4976-bfec-16a98fcfbd0d-kube-api-access-6h2db\") pod \"node-resolver-62dc6\" (UID: \"101ca23a-112e-4976-bfec-16a98fcfbd0d\") " pod="openshift-dns/node-resolver-62dc6" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.690126 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/101ca23a-112e-4976-bfec-16a98fcfbd0d-hosts-file\") pod \"node-resolver-62dc6\" (UID: \"101ca23a-112e-4976-bfec-16a98fcfbd0d\") " pod="openshift-dns/node-resolver-62dc6" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.690223 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/101ca23a-112e-4976-bfec-16a98fcfbd0d-hosts-file\") pod \"node-resolver-62dc6\" (UID: \"101ca23a-112e-4976-bfec-16a98fcfbd0d\") " pod="openshift-dns/node-resolver-62dc6" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.724701 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6h2db\" (UniqueName: \"kubernetes.io/projected/101ca23a-112e-4976-bfec-16a98fcfbd0d-kube-api-access-6h2db\") pod \"node-resolver-62dc6\" (UID: \"101ca23a-112e-4976-bfec-16a98fcfbd0d\") " pod="openshift-dns/node-resolver-62dc6" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.727088 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.727186 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.727241 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.727301 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.727354 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:21Z","lastTransitionTime":"2026-01-06T08:15:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.830823 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.830875 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.830887 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.830906 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.830918 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:21Z","lastTransitionTime":"2026-01-06T08:15:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.933234 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.933296 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.933311 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.933334 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.933349 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:21Z","lastTransitionTime":"2026-01-06T08:15:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.979076 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-68nth"] Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.979465 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.981536 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-24ksn"] Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.981724 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.981918 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.981996 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.982199 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-l2xdd"] Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.982423 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.982886 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.983861 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.984107 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.985790 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.986205 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.986234 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.986243 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.986259 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.986291 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.986341 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.992913 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/35dd8181-ce20-4a99-a883-84811f75e0a6-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.992951 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmp8f\" (UniqueName: \"kubernetes.io/projected/35dd8181-ce20-4a99-a883-84811f75e0a6-kube-api-access-qmp8f\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.992987 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-system-cni-dir\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993009 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-multus-cni-dir\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993029 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-etc-kubernetes\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993060 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-multus-conf-dir\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993089 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-os-release\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993109 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9kgcf\" (UniqueName: \"kubernetes.io/projected/85f24cc3-ceca-49ce-b774-32e773e72c1a-kube-api-access-9kgcf\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993129 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/35dd8181-ce20-4a99-a883-84811f75e0a6-cni-binary-copy\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993150 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/35dd8181-ce20-4a99-a883-84811f75e0a6-system-cni-dir\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993170 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/94b52312-7b54-4df2-ab82-0eb7b01334f7-proxy-tls\") pod \"machine-config-daemon-68nth\" (UID: \"94b52312-7b54-4df2-ab82-0eb7b01334f7\") " pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993190 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-run-k8s-cni-cncf-io\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993262 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/94b52312-7b54-4df2-ab82-0eb7b01334f7-mcd-auth-proxy-config\") pod \"machine-config-daemon-68nth\" (UID: \"94b52312-7b54-4df2-ab82-0eb7b01334f7\") " pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993354 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/85f24cc3-ceca-49ce-b774-32e773e72c1a-cni-binary-copy\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993393 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-var-lib-cni-bin\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993420 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-var-lib-kubelet\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993448 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/35dd8181-ce20-4a99-a883-84811f75e0a6-cnibin\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993505 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/35dd8181-ce20-4a99-a883-84811f75e0a6-tuning-conf-dir\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993572 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/35dd8181-ce20-4a99-a883-84811f75e0a6-os-release\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993605 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-run-netns\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993659 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-multus-socket-dir-parent\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993690 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/85f24cc3-ceca-49ce-b774-32e773e72c1a-multus-daemon-config\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993720 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/94b52312-7b54-4df2-ab82-0eb7b01334f7-rootfs\") pod \"machine-config-daemon-68nth\" (UID: \"94b52312-7b54-4df2-ab82-0eb7b01334f7\") " pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993751 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-cnibin\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993769 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-hostroot\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993784 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-run-multus-certs\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993800 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-var-lib-cni-multus\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:21 crc kubenswrapper[4784]: I0106 08:15:21.993814 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85vh6\" (UniqueName: \"kubernetes.io/projected/94b52312-7b54-4df2-ab82-0eb7b01334f7-kube-api-access-85vh6\") pod \"machine-config-daemon-68nth\" (UID: \"94b52312-7b54-4df2-ab82-0eb7b01334f7\") " pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.012009 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.015090 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-62dc6" Jan 06 08:15:22 crc kubenswrapper[4784]: W0106 08:15:22.027405 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod101ca23a_112e_4976_bfec_16a98fcfbd0d.slice/crio-0dcc4d9e6a0e22838e721e6f4dea7d057670f2de0b1b31f6810298442cd223dc WatchSource:0}: Error finding container 0dcc4d9e6a0e22838e721e6f4dea7d057670f2de0b1b31f6810298442cd223dc: Status 404 returned error can't find the container with id 0dcc4d9e6a0e22838e721e6f4dea7d057670f2de0b1b31f6810298442cd223dc Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.039016 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.039079 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.039094 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.039114 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.039130 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:22Z","lastTransitionTime":"2026-01-06T08:15:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.042236 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.058090 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.072558 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094588 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-run-netns\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094646 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-multus-socket-dir-parent\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094668 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/85f24cc3-ceca-49ce-b774-32e773e72c1a-multus-daemon-config\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094693 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-cnibin\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094714 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-hostroot\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094733 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-run-multus-certs\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094762 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/94b52312-7b54-4df2-ab82-0eb7b01334f7-rootfs\") pod \"machine-config-daemon-68nth\" (UID: \"94b52312-7b54-4df2-ab82-0eb7b01334f7\") " pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094786 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-var-lib-cni-multus\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094806 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85vh6\" (UniqueName: \"kubernetes.io/projected/94b52312-7b54-4df2-ab82-0eb7b01334f7-kube-api-access-85vh6\") pod \"machine-config-daemon-68nth\" (UID: \"94b52312-7b54-4df2-ab82-0eb7b01334f7\") " pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094827 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/35dd8181-ce20-4a99-a883-84811f75e0a6-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094850 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmp8f\" (UniqueName: \"kubernetes.io/projected/35dd8181-ce20-4a99-a883-84811f75e0a6-kube-api-access-qmp8f\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094878 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-system-cni-dir\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094896 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-multus-cni-dir\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094914 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-etc-kubernetes\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094944 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-multus-conf-dir\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094973 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-os-release\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.094992 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9kgcf\" (UniqueName: \"kubernetes.io/projected/85f24cc3-ceca-49ce-b774-32e773e72c1a-kube-api-access-9kgcf\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095033 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/35dd8181-ce20-4a99-a883-84811f75e0a6-cni-binary-copy\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095054 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/35dd8181-ce20-4a99-a883-84811f75e0a6-system-cni-dir\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095072 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/94b52312-7b54-4df2-ab82-0eb7b01334f7-proxy-tls\") pod \"machine-config-daemon-68nth\" (UID: \"94b52312-7b54-4df2-ab82-0eb7b01334f7\") " pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095090 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-run-k8s-cni-cncf-io\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095136 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/94b52312-7b54-4df2-ab82-0eb7b01334f7-mcd-auth-proxy-config\") pod \"machine-config-daemon-68nth\" (UID: \"94b52312-7b54-4df2-ab82-0eb7b01334f7\") " pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095157 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/85f24cc3-ceca-49ce-b774-32e773e72c1a-cni-binary-copy\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095179 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-var-lib-cni-bin\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095198 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-var-lib-kubelet\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095217 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/35dd8181-ce20-4a99-a883-84811f75e0a6-cnibin\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095237 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/35dd8181-ce20-4a99-a883-84811f75e0a6-tuning-conf-dir\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095258 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/35dd8181-ce20-4a99-a883-84811f75e0a6-os-release\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095359 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/35dd8181-ce20-4a99-a883-84811f75e0a6-os-release\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095406 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-run-netns\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095447 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-multus-socket-dir-parent\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095822 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-multus-conf-dir\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095881 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-etc-kubernetes\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095882 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-run-multus-certs\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095935 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-cnibin\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095959 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-hostroot\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095968 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-run-k8s-cni-cncf-io\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095994 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-multus-cni-dir\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.096032 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/35dd8181-ce20-4a99-a883-84811f75e0a6-system-cni-dir\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.095994 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-system-cni-dir\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.096001 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-os-release\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.096057 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/94b52312-7b54-4df2-ab82-0eb7b01334f7-rootfs\") pod \"machine-config-daemon-68nth\" (UID: \"94b52312-7b54-4df2-ab82-0eb7b01334f7\") " pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.096076 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-var-lib-cni-multus\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.096100 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/35dd8181-ce20-4a99-a883-84811f75e0a6-cnibin\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.096109 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-var-lib-cni-bin\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.096122 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/85f24cc3-ceca-49ce-b774-32e773e72c1a-host-var-lib-kubelet\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.096571 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/35dd8181-ce20-4a99-a883-84811f75e0a6-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.096641 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/85f24cc3-ceca-49ce-b774-32e773e72c1a-cni-binary-copy\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.096658 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/35dd8181-ce20-4a99-a883-84811f75e0a6-cni-binary-copy\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.096673 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/35dd8181-ce20-4a99-a883-84811f75e0a6-tuning-conf-dir\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.096906 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/94b52312-7b54-4df2-ab82-0eb7b01334f7-mcd-auth-proxy-config\") pod \"machine-config-daemon-68nth\" (UID: \"94b52312-7b54-4df2-ab82-0eb7b01334f7\") " pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.096937 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/85f24cc3-ceca-49ce-b774-32e773e72c1a-multus-daemon-config\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.098822 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/94b52312-7b54-4df2-ab82-0eb7b01334f7-proxy-tls\") pod \"machine-config-daemon-68nth\" (UID: \"94b52312-7b54-4df2-ab82-0eb7b01334f7\") " pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.101302 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.118262 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85vh6\" (UniqueName: \"kubernetes.io/projected/94b52312-7b54-4df2-ab82-0eb7b01334f7-kube-api-access-85vh6\") pod \"machine-config-daemon-68nth\" (UID: \"94b52312-7b54-4df2-ab82-0eb7b01334f7\") " pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.122083 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9kgcf\" (UniqueName: \"kubernetes.io/projected/85f24cc3-ceca-49ce-b774-32e773e72c1a-kube-api-access-9kgcf\") pod \"multus-l2xdd\" (UID: \"85f24cc3-ceca-49ce-b774-32e773e72c1a\") " pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.126310 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.130141 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmp8f\" (UniqueName: \"kubernetes.io/projected/35dd8181-ce20-4a99-a883-84811f75e0a6-kube-api-access-qmp8f\") pod \"multus-additional-cni-plugins-24ksn\" (UID: \"35dd8181-ce20-4a99-a883-84811f75e0a6\") " pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.146861 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.156766 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.156819 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.156864 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.156882 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.156893 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:22Z","lastTransitionTime":"2026-01-06T08:15:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.170648 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.186697 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.203261 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.219421 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.231015 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.241997 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.251443 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.258677 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.258712 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.258724 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.258740 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.258751 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:22Z","lastTransitionTime":"2026-01-06T08:15:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.264822 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.275970 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.287271 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.295284 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.303361 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-l2xdd" Jan 06 08:15:22 crc kubenswrapper[4784]: W0106 08:15:22.304118 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod94b52312_7b54_4df2_ab82_0eb7b01334f7.slice/crio-1b1401245e30d35a711a2d0085886eda7253e6469729b5f106f21a0d4af04bba WatchSource:0}: Error finding container 1b1401245e30d35a711a2d0085886eda7253e6469729b5f106f21a0d4af04bba: Status 404 returned error can't find the container with id 1b1401245e30d35a711a2d0085886eda7253e6469729b5f106f21a0d4af04bba Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.304624 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.311816 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-24ksn" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.311930 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.311985 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.311938 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:22 crc kubenswrapper[4784]: E0106 08:15:22.312075 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:22 crc kubenswrapper[4784]: E0106 08:15:22.312147 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:22 crc kubenswrapper[4784]: E0106 08:15:22.312266 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:22 crc kubenswrapper[4784]: W0106 08:15:22.319850 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod85f24cc3_ceca_49ce_b774_32e773e72c1a.slice/crio-adca192ad1a1942041f6af7c622d54dfb9a16d3e233947d017a5f45dc57c65f2 WatchSource:0}: Error finding container adca192ad1a1942041f6af7c622d54dfb9a16d3e233947d017a5f45dc57c65f2: Status 404 returned error can't find the container with id adca192ad1a1942041f6af7c622d54dfb9a16d3e233947d017a5f45dc57c65f2 Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.322965 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: W0106 08:15:22.337040 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod35dd8181_ce20_4a99_a883_84811f75e0a6.slice/crio-370117d2c181d5b84b99affd19601147d2ea0124099903f4baa2686cc405b8f2 WatchSource:0}: Error finding container 370117d2c181d5b84b99affd19601147d2ea0124099903f4baa2686cc405b8f2: Status 404 returned error can't find the container with id 370117d2c181d5b84b99affd19601147d2ea0124099903f4baa2686cc405b8f2 Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.361890 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.361922 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.361931 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.361944 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.361954 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:22Z","lastTransitionTime":"2026-01-06T08:15:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.370036 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.394603 4784 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-06 08:10:21 +0000 UTC, rotation deadline is 2026-10-29 14:32:49.920064 +0000 UTC Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.394666 4784 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7110h17m27.525399805s for next certificate rotation Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.398843 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.423868 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.464034 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.464063 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.464071 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.464086 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.464096 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:22Z","lastTransitionTime":"2026-01-06T08:15:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.465408 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-l2xdd" event={"ID":"85f24cc3-ceca-49ce-b774-32e773e72c1a","Type":"ContainerStarted","Data":"adca192ad1a1942041f6af7c622d54dfb9a16d3e233947d017a5f45dc57c65f2"} Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.467483 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"1b1401245e30d35a711a2d0085886eda7253e6469729b5f106f21a0d4af04bba"} Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.469305 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" event={"ID":"35dd8181-ce20-4a99-a883-84811f75e0a6","Type":"ContainerStarted","Data":"370117d2c181d5b84b99affd19601147d2ea0124099903f4baa2686cc405b8f2"} Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.471402 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-62dc6" event={"ID":"101ca23a-112e-4976-bfec-16a98fcfbd0d","Type":"ContainerStarted","Data":"7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58"} Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.471441 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-62dc6" event={"ID":"101ca23a-112e-4976-bfec-16a98fcfbd0d","Type":"ContainerStarted","Data":"0dcc4d9e6a0e22838e721e6f4dea7d057670f2de0b1b31f6810298442cd223dc"} Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.483340 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.494221 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.505649 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.522515 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.533351 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.548338 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.562656 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.568972 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.569015 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.569023 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.569038 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.569051 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:22Z","lastTransitionTime":"2026-01-06T08:15:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.579518 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.593001 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.606778 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.621808 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.636374 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.671525 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.671596 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.671608 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.671626 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.671640 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:22Z","lastTransitionTime":"2026-01-06T08:15:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.773709 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.773753 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.773764 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.773780 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.773795 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:22Z","lastTransitionTime":"2026-01-06T08:15:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.808585 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-blw4q"] Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.809364 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.811123 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.811879 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.812057 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.812185 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.812289 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.812399 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.812609 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.821988 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.834160 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.851995 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.863224 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.872803 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.876488 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.876539 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.876566 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.876583 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.876615 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:22Z","lastTransitionTime":"2026-01-06T08:15:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.884731 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.899421 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.902604 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-cni-bin\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.902638 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-cni-netd\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.902655 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-ovn\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.902672 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-run-ovn-kubernetes\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.902687 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-run-netns\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.902701 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-log-socket\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.902718 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-openvswitch\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.902733 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvbtl\" (UniqueName: \"kubernetes.io/projected/700c7389-9fff-4331-9d37-6af2ff592ac5-kube-api-access-lvbtl\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.902861 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/700c7389-9fff-4331-9d37-6af2ff592ac5-ovn-node-metrics-cert\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.902932 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-slash\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.903002 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-var-lib-openvswitch\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.903056 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-node-log\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.903094 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-env-overrides\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.903173 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-kubelet\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.903210 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-ovnkube-config\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.903240 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-ovnkube-script-lib\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.903269 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-systemd-units\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.903303 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.903372 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-etc-openvswitch\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.903404 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-systemd\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.913598 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.927320 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.939092 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.952819 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.967613 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.979404 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.979455 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.979467 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.979485 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.979501 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:22Z","lastTransitionTime":"2026-01-06T08:15:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:22 crc kubenswrapper[4784]: I0106 08:15:22.990876 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:22Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004095 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-openvswitch\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004138 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvbtl\" (UniqueName: \"kubernetes.io/projected/700c7389-9fff-4331-9d37-6af2ff592ac5-kube-api-access-lvbtl\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004163 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/700c7389-9fff-4331-9d37-6af2ff592ac5-ovn-node-metrics-cert\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004200 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-slash\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004226 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-var-lib-openvswitch\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004247 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-node-log\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004267 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-env-overrides\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004298 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-ovnkube-config\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004319 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-kubelet\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004338 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-ovnkube-script-lib\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004358 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-systemd-units\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004358 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-var-lib-openvswitch\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004382 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004432 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004440 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-slash\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004467 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-systemd\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004501 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-systemd\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004506 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-systemd-units\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004489 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-kubelet\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004563 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-node-log\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004565 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-openvswitch\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004517 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-etc-openvswitch\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004787 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-cni-bin\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004611 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-etc-openvswitch\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004843 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-cni-netd\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004861 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-cni-bin\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004923 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-cni-netd\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004972 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-ovn\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.004997 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-run-netns\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.005084 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-ovn\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.005327 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-log-socket\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.005360 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-env-overrides\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.005400 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-run-netns\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.005439 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-log-socket\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.005480 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-run-ovn-kubernetes\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.005514 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-run-ovn-kubernetes\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.005653 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-ovnkube-script-lib\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.005713 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-ovnkube-config\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.012251 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/700c7389-9fff-4331-9d37-6af2ff592ac5-ovn-node-metrics-cert\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.021671 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvbtl\" (UniqueName: \"kubernetes.io/projected/700c7389-9fff-4331-9d37-6af2ff592ac5-kube-api-access-lvbtl\") pod \"ovnkube-node-blw4q\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.081506 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.081566 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.081579 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.081595 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.081606 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:23Z","lastTransitionTime":"2026-01-06T08:15:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.132993 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:23 crc kubenswrapper[4784]: W0106 08:15:23.147722 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod700c7389_9fff_4331_9d37_6af2ff592ac5.slice/crio-29ec8b9e9521c18c57eb011b0ee97b7d548947a004cb0b44587c8211c1cf8a18 WatchSource:0}: Error finding container 29ec8b9e9521c18c57eb011b0ee97b7d548947a004cb0b44587c8211c1cf8a18: Status 404 returned error can't find the container with id 29ec8b9e9521c18c57eb011b0ee97b7d548947a004cb0b44587c8211c1cf8a18 Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.184662 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.184703 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.184717 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.184733 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.184745 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:23Z","lastTransitionTime":"2026-01-06T08:15:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.287394 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.287437 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.287448 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.287464 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.287474 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:23Z","lastTransitionTime":"2026-01-06T08:15:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.389966 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.389999 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.390007 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.390020 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.390029 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:23Z","lastTransitionTime":"2026-01-06T08:15:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.475194 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-l2xdd" event={"ID":"85f24cc3-ceca-49ce-b774-32e773e72c1a","Type":"ContainerStarted","Data":"1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e"} Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.476814 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f"} Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.476857 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464"} Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.478139 4784 generic.go:334] "Generic (PLEG): container finished" podID="35dd8181-ce20-4a99-a883-84811f75e0a6" containerID="916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22" exitCode=0 Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.478376 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" event={"ID":"35dd8181-ce20-4a99-a883-84811f75e0a6","Type":"ContainerDied","Data":"916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22"} Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.479150 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerStarted","Data":"29ec8b9e9521c18c57eb011b0ee97b7d548947a004cb0b44587c8211c1cf8a18"} Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.492233 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.492294 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.492308 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.492325 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.492337 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:23Z","lastTransitionTime":"2026-01-06T08:15:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.496143 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.511845 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.527628 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.541852 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.554031 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.565720 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.574298 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.583793 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.595955 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.597086 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.597120 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.597129 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.597144 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.597153 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:23Z","lastTransitionTime":"2026-01-06T08:15:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.611647 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.622975 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.635589 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.651769 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.668883 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.679504 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.690593 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.699319 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.699358 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.699368 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.699382 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.699393 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:23Z","lastTransitionTime":"2026-01-06T08:15:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.701763 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.710898 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.720678 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.735336 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.745928 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.758233 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.775214 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.787080 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.799277 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.801235 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.801271 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.801279 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.801295 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.801303 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:23Z","lastTransitionTime":"2026-01-06T08:15:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.812520 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:23Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.903226 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.903269 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.903281 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.903301 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.903312 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:23Z","lastTransitionTime":"2026-01-06T08:15:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.914938 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.915053 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:23 crc kubenswrapper[4784]: I0106 08:15:23.915085 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:23 crc kubenswrapper[4784]: E0106 08:15:23.915129 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:15:31.915101169 +0000 UTC m=+33.961274016 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:15:23 crc kubenswrapper[4784]: E0106 08:15:23.915205 4784 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:15:23 crc kubenswrapper[4784]: E0106 08:15:23.915273 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:31.915254954 +0000 UTC m=+33.961427881 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:15:23 crc kubenswrapper[4784]: E0106 08:15:23.915285 4784 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:15:23 crc kubenswrapper[4784]: E0106 08:15:23.915416 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:31.915381278 +0000 UTC m=+33.961554155 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.005833 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.005878 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.005889 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.005906 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.005918 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:24Z","lastTransitionTime":"2026-01-06T08:15:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.015825 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.015896 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:24 crc kubenswrapper[4784]: E0106 08:15:24.016041 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:15:24 crc kubenswrapper[4784]: E0106 08:15:24.016075 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:15:24 crc kubenswrapper[4784]: E0106 08:15:24.016088 4784 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:24 crc kubenswrapper[4784]: E0106 08:15:24.016089 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:15:24 crc kubenswrapper[4784]: E0106 08:15:24.016119 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:15:24 crc kubenswrapper[4784]: E0106 08:15:24.016129 4784 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:24 crc kubenswrapper[4784]: E0106 08:15:24.016154 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:32.016136152 +0000 UTC m=+34.062309079 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:24 crc kubenswrapper[4784]: E0106 08:15:24.016179 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:32.016163463 +0000 UTC m=+34.062336300 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.114747 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.114798 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.114814 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.114834 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.114850 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:24Z","lastTransitionTime":"2026-01-06T08:15:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.218389 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.218864 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.219023 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.219233 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.219427 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:24Z","lastTransitionTime":"2026-01-06T08:15:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.268033 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-qnthf"] Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.268412 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-qnthf" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.270447 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.271374 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.271697 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.272509 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.296903 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.311619 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.311633 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.312050 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:24 crc kubenswrapper[4784]: E0106 08:15:24.312216 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:24 crc kubenswrapper[4784]: E0106 08:15:24.312397 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:24 crc kubenswrapper[4784]: E0106 08:15:24.312644 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.317708 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/1769680a-b6b7-4ffd-98db-76a67c46caf2-serviceca\") pod \"node-ca-qnthf\" (UID: \"1769680a-b6b7-4ffd-98db-76a67c46caf2\") " pod="openshift-image-registry/node-ca-qnthf" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.317775 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1769680a-b6b7-4ffd-98db-76a67c46caf2-host\") pod \"node-ca-qnthf\" (UID: \"1769680a-b6b7-4ffd-98db-76a67c46caf2\") " pod="openshift-image-registry/node-ca-qnthf" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.317814 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5p9j\" (UniqueName: \"kubernetes.io/projected/1769680a-b6b7-4ffd-98db-76a67c46caf2-kube-api-access-x5p9j\") pod \"node-ca-qnthf\" (UID: \"1769680a-b6b7-4ffd-98db-76a67c46caf2\") " pod="openshift-image-registry/node-ca-qnthf" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.319409 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.322224 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.322258 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.322269 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.322286 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.322298 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:24Z","lastTransitionTime":"2026-01-06T08:15:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.341266 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.371661 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.388310 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.406840 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.418843 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1769680a-b6b7-4ffd-98db-76a67c46caf2-host\") pod \"node-ca-qnthf\" (UID: \"1769680a-b6b7-4ffd-98db-76a67c46caf2\") " pod="openshift-image-registry/node-ca-qnthf" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.418898 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5p9j\" (UniqueName: \"kubernetes.io/projected/1769680a-b6b7-4ffd-98db-76a67c46caf2-kube-api-access-x5p9j\") pod \"node-ca-qnthf\" (UID: \"1769680a-b6b7-4ffd-98db-76a67c46caf2\") " pod="openshift-image-registry/node-ca-qnthf" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.418948 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/1769680a-b6b7-4ffd-98db-76a67c46caf2-serviceca\") pod \"node-ca-qnthf\" (UID: \"1769680a-b6b7-4ffd-98db-76a67c46caf2\") " pod="openshift-image-registry/node-ca-qnthf" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.418988 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1769680a-b6b7-4ffd-98db-76a67c46caf2-host\") pod \"node-ca-qnthf\" (UID: \"1769680a-b6b7-4ffd-98db-76a67c46caf2\") " pod="openshift-image-registry/node-ca-qnthf" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.420273 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/1769680a-b6b7-4ffd-98db-76a67c46caf2-serviceca\") pod \"node-ca-qnthf\" (UID: \"1769680a-b6b7-4ffd-98db-76a67c46caf2\") " pod="openshift-image-registry/node-ca-qnthf" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.423776 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.424925 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.425106 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.425229 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.425344 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.425472 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:24Z","lastTransitionTime":"2026-01-06T08:15:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.435472 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.438649 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5p9j\" (UniqueName: \"kubernetes.io/projected/1769680a-b6b7-4ffd-98db-76a67c46caf2-kube-api-access-x5p9j\") pod \"node-ca-qnthf\" (UID: \"1769680a-b6b7-4ffd-98db-76a67c46caf2\") " pod="openshift-image-registry/node-ca-qnthf" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.445521 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.458248 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.471732 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.482310 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.483913 4784 generic.go:334] "Generic (PLEG): container finished" podID="35dd8181-ce20-4a99-a883-84811f75e0a6" containerID="25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede" exitCode=0 Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.484013 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" event={"ID":"35dd8181-ce20-4a99-a883-84811f75e0a6","Type":"ContainerDied","Data":"25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede"} Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.485667 4784 generic.go:334] "Generic (PLEG): container finished" podID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerID="ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb" exitCode=0 Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.485853 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerDied","Data":"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb"} Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.500977 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.512703 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.525875 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.528589 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.528650 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.528671 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.528696 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.528713 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:24Z","lastTransitionTime":"2026-01-06T08:15:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.540651 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.555166 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.566222 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.577529 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.585005 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-qnthf" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.592759 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: W0106 08:15:24.608394 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1769680a_b6b7_4ffd_98db_76a67c46caf2.slice/crio-a45c36cbbb07a1d21b11c79b2a6d24d60aef4d72333dd878fe29476bf8c4ce99 WatchSource:0}: Error finding container a45c36cbbb07a1d21b11c79b2a6d24d60aef4d72333dd878fe29476bf8c4ce99: Status 404 returned error can't find the container with id a45c36cbbb07a1d21b11c79b2a6d24d60aef4d72333dd878fe29476bf8c4ce99 Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.614383 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.626436 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.631426 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.631463 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.631475 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.631491 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.631504 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:24Z","lastTransitionTime":"2026-01-06T08:15:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.642040 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.665335 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.681147 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.709678 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.737722 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.752954 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.752992 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.753003 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.753019 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.753030 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:24Z","lastTransitionTime":"2026-01-06T08:15:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.771386 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:24Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.854233 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.854267 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.854276 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.854291 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.854300 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:24Z","lastTransitionTime":"2026-01-06T08:15:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.957083 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.957117 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.957127 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.957141 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:24 crc kubenswrapper[4784]: I0106 08:15:24.957152 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:24Z","lastTransitionTime":"2026-01-06T08:15:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.060125 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.060158 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.060167 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.060182 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.060192 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:25Z","lastTransitionTime":"2026-01-06T08:15:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.162489 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.162521 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.162529 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.162553 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.162564 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:25Z","lastTransitionTime":"2026-01-06T08:15:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.265141 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.265183 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.265196 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.265211 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.265223 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:25Z","lastTransitionTime":"2026-01-06T08:15:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.368615 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.368665 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.368677 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.368700 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.368713 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:25Z","lastTransitionTime":"2026-01-06T08:15:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.471444 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.471512 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.471536 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.471602 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.471631 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:25Z","lastTransitionTime":"2026-01-06T08:15:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.492219 4784 generic.go:334] "Generic (PLEG): container finished" podID="35dd8181-ce20-4a99-a883-84811f75e0a6" containerID="d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4" exitCode=0 Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.492332 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" event={"ID":"35dd8181-ce20-4a99-a883-84811f75e0a6","Type":"ContainerDied","Data":"d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.499165 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerStarted","Data":"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.499247 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerStarted","Data":"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.499269 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerStarted","Data":"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.499288 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerStarted","Data":"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.499309 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerStarted","Data":"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.499326 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerStarted","Data":"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.503741 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-qnthf" event={"ID":"1769680a-b6b7-4ffd-98db-76a67c46caf2","Type":"ContainerStarted","Data":"ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.503807 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-qnthf" event={"ID":"1769680a-b6b7-4ffd-98db-76a67c46caf2","Type":"ContainerStarted","Data":"a45c36cbbb07a1d21b11c79b2a6d24d60aef4d72333dd878fe29476bf8c4ce99"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.515659 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.535782 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.551276 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.567969 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.573913 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.573959 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.573972 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.573989 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.574001 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:25Z","lastTransitionTime":"2026-01-06T08:15:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.581446 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.593583 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.610825 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.625368 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.641312 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.652770 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.670150 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.677022 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.677070 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.677084 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.677106 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.677122 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:25Z","lastTransitionTime":"2026-01-06T08:15:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.684045 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.697342 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.709331 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.725290 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.739047 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.753414 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.771340 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.780064 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.780135 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.780159 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.780189 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.780214 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:25Z","lastTransitionTime":"2026-01-06T08:15:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.786196 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.799727 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.815905 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.835099 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.853820 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.874572 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.882861 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.882916 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.882936 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.882960 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.882980 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:25Z","lastTransitionTime":"2026-01-06T08:15:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.889061 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.907662 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.939816 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.956000 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:25Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.986778 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.986829 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.986852 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.986879 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:25 crc kubenswrapper[4784]: I0106 08:15:25.986898 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:25Z","lastTransitionTime":"2026-01-06T08:15:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.090153 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.090235 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.090254 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.090310 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.090330 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:26Z","lastTransitionTime":"2026-01-06T08:15:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.194019 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.194089 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.194112 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.194141 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.194169 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:26Z","lastTransitionTime":"2026-01-06T08:15:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.297015 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.297093 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.297114 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.297142 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.297163 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:26Z","lastTransitionTime":"2026-01-06T08:15:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.311365 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.311460 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:26 crc kubenswrapper[4784]: E0106 08:15:26.311638 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.311667 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:26 crc kubenswrapper[4784]: E0106 08:15:26.311809 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:26 crc kubenswrapper[4784]: E0106 08:15:26.311917 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.400134 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.400206 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.400231 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.400265 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.400288 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:26Z","lastTransitionTime":"2026-01-06T08:15:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.503144 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.503210 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.503228 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.503253 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.503271 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:26Z","lastTransitionTime":"2026-01-06T08:15:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.512179 4784 generic.go:334] "Generic (PLEG): container finished" podID="35dd8181-ce20-4a99-a883-84811f75e0a6" containerID="c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b" exitCode=0 Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.512241 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" event={"ID":"35dd8181-ce20-4a99-a883-84811f75e0a6","Type":"ContainerDied","Data":"c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b"} Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.531533 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:26Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.572900 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:26Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.595995 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:26Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.607096 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.607173 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.607199 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.607232 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.607258 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:26Z","lastTransitionTime":"2026-01-06T08:15:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.614127 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:26Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.630099 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:26Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.642336 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:26Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.658561 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:26Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.672153 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:26Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.682836 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:26Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.699106 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:26Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.709225 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.709268 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.709282 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.709303 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.709318 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:26Z","lastTransitionTime":"2026-01-06T08:15:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.714433 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:26Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.730670 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:26Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.750499 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:26Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.767782 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:26Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.811978 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.812030 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.812044 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.812062 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.812077 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:26Z","lastTransitionTime":"2026-01-06T08:15:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.915700 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.915787 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.915812 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.915846 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:26 crc kubenswrapper[4784]: I0106 08:15:26.915868 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:26Z","lastTransitionTime":"2026-01-06T08:15:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.018427 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.018478 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.018496 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.018522 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.018539 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:27Z","lastTransitionTime":"2026-01-06T08:15:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.121107 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.121165 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.121189 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.121215 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.121231 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:27Z","lastTransitionTime":"2026-01-06T08:15:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.223476 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.223531 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.223576 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.223606 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.223626 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:27Z","lastTransitionTime":"2026-01-06T08:15:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.326580 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.326625 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.326640 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.326662 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.326681 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:27Z","lastTransitionTime":"2026-01-06T08:15:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.429843 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.429877 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.429887 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.429901 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.429913 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:27Z","lastTransitionTime":"2026-01-06T08:15:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.520351 4784 generic.go:334] "Generic (PLEG): container finished" podID="35dd8181-ce20-4a99-a883-84811f75e0a6" containerID="92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051" exitCode=0 Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.520460 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" event={"ID":"35dd8181-ce20-4a99-a883-84811f75e0a6","Type":"ContainerDied","Data":"92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051"} Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.531760 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerStarted","Data":"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5"} Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.533141 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.533221 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.533238 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.533264 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.533278 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:27Z","lastTransitionTime":"2026-01-06T08:15:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.549897 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:27Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.570840 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:27Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.589499 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:27Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.620520 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:27Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.636195 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.636270 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.636295 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.636323 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.636347 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:27Z","lastTransitionTime":"2026-01-06T08:15:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.636588 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:27Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.662538 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:27Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.678667 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:27Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.699519 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:27Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.718185 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:27Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.731621 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:27Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.742198 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.742239 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.742251 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.742269 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.742280 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:27Z","lastTransitionTime":"2026-01-06T08:15:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.751663 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:27Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.765015 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:27Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.785063 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:27Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.796579 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:27Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.845716 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.845756 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.845764 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.845778 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.845787 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:27Z","lastTransitionTime":"2026-01-06T08:15:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.949285 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.949345 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.949356 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.949375 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:27 crc kubenswrapper[4784]: I0106 08:15:27.949394 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:27Z","lastTransitionTime":"2026-01-06T08:15:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.052477 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.052520 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.052536 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.052578 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.052593 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:28Z","lastTransitionTime":"2026-01-06T08:15:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.151835 4784 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.158067 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.158105 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.158117 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.158136 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.158152 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:28Z","lastTransitionTime":"2026-01-06T08:15:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.260332 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.260418 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.260478 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.260507 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.260525 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:28Z","lastTransitionTime":"2026-01-06T08:15:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.315711 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:28 crc kubenswrapper[4784]: E0106 08:15:28.315880 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.316424 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:28 crc kubenswrapper[4784]: E0106 08:15:28.316523 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.316644 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:28 crc kubenswrapper[4784]: E0106 08:15:28.316803 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.362842 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.362903 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.362918 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.362940 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.362954 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:28Z","lastTransitionTime":"2026-01-06T08:15:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.386637 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.402725 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.417964 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.435100 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.453408 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.465287 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.465345 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.465363 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.465387 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.465405 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:28Z","lastTransitionTime":"2026-01-06T08:15:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.471954 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.488703 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.502837 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.517280 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.528976 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.545453 4784 generic.go:334] "Generic (PLEG): container finished" podID="35dd8181-ce20-4a99-a883-84811f75e0a6" containerID="3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1" exitCode=0 Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.545506 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" event={"ID":"35dd8181-ce20-4a99-a883-84811f75e0a6","Type":"ContainerDied","Data":"3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1"} Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.550311 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.569460 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.569487 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.569495 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.569507 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.569516 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:28Z","lastTransitionTime":"2026-01-06T08:15:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.571991 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.586808 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.603911 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.613462 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.632036 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.647135 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.663169 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.672204 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.672236 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.672248 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.672264 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.672274 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:28Z","lastTransitionTime":"2026-01-06T08:15:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.678526 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.691665 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.706927 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.722271 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.736752 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.754047 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.767677 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.776774 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.776822 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.776842 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.776868 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.776887 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:28Z","lastTransitionTime":"2026-01-06T08:15:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.783059 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.804250 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.818983 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.880534 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.880613 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.880626 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.880646 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.880659 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:28Z","lastTransitionTime":"2026-01-06T08:15:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.984162 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.984223 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.984241 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.984266 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:28 crc kubenswrapper[4784]: I0106 08:15:28.984286 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:28Z","lastTransitionTime":"2026-01-06T08:15:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.087083 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.087150 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.087169 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.087194 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.087215 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:29Z","lastTransitionTime":"2026-01-06T08:15:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.190352 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.190413 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.190434 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.190792 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.190820 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:29Z","lastTransitionTime":"2026-01-06T08:15:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.294797 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.294862 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.294879 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.294904 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.294947 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:29Z","lastTransitionTime":"2026-01-06T08:15:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.391045 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.391370 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.391588 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.391811 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.391982 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:29Z","lastTransitionTime":"2026-01-06T08:15:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:29 crc kubenswrapper[4784]: E0106 08:15:29.414989 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.419997 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.420050 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.420068 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.420091 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.420111 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:29Z","lastTransitionTime":"2026-01-06T08:15:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:29 crc kubenswrapper[4784]: E0106 08:15:29.443279 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.450265 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.450336 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.450356 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.450386 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.450406 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:29Z","lastTransitionTime":"2026-01-06T08:15:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:29 crc kubenswrapper[4784]: E0106 08:15:29.471208 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.477029 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.477174 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.477276 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.477410 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.477496 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:29Z","lastTransitionTime":"2026-01-06T08:15:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:29 crc kubenswrapper[4784]: E0106 08:15:29.500412 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.506006 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.506043 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.506054 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.506073 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.506084 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:29Z","lastTransitionTime":"2026-01-06T08:15:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:29 crc kubenswrapper[4784]: E0106 08:15:29.527290 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: E0106 08:15:29.527450 4784 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.533229 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.533299 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.533313 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.533333 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.533350 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:29Z","lastTransitionTime":"2026-01-06T08:15:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.565087 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" event={"ID":"35dd8181-ce20-4a99-a883-84811f75e0a6","Type":"ContainerStarted","Data":"2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333"} Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.591755 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.622407 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.637125 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.637172 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.637185 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.637202 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.637214 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:29Z","lastTransitionTime":"2026-01-06T08:15:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.639164 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.660129 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.670656 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.682312 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.696127 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.709179 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.722936 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.734905 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.740013 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.740041 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.740051 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.740064 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.740074 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:29Z","lastTransitionTime":"2026-01-06T08:15:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.747822 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.770991 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.790484 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.804980 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.843205 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.843254 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.843269 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.843289 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.843303 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:29Z","lastTransitionTime":"2026-01-06T08:15:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.945655 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.945706 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.945715 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.945730 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:29 crc kubenswrapper[4784]: I0106 08:15:29.945741 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:29Z","lastTransitionTime":"2026-01-06T08:15:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.049872 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.049921 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.049933 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.049948 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.049957 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:30Z","lastTransitionTime":"2026-01-06T08:15:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.152842 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.152892 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.152903 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.152921 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.152933 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:30Z","lastTransitionTime":"2026-01-06T08:15:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.256768 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.256825 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.256849 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.256877 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.256906 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:30Z","lastTransitionTime":"2026-01-06T08:15:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.312225 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.312276 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:30 crc kubenswrapper[4784]: E0106 08:15:30.312495 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:30 crc kubenswrapper[4784]: E0106 08:15:30.312643 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.313062 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:30 crc kubenswrapper[4784]: E0106 08:15:30.313443 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.360033 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.360112 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.360132 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.360158 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.360177 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:30Z","lastTransitionTime":"2026-01-06T08:15:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.463126 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.463180 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.463192 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.463212 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.463225 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:30Z","lastTransitionTime":"2026-01-06T08:15:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.566971 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.567027 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.567038 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.567055 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.567066 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:30Z","lastTransitionTime":"2026-01-06T08:15:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.579707 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerStarted","Data":"758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62"} Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.605853 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:30Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.631509 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:30Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.654064 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:30Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.670228 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.670572 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.670676 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.670794 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.670902 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:30Z","lastTransitionTime":"2026-01-06T08:15:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.678282 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:30Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.698387 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:30Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.712759 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:30Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.734779 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:30Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.753345 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:30Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.770810 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:30Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.774028 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.774062 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.774078 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.774101 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.774116 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:30Z","lastTransitionTime":"2026-01-06T08:15:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.803858 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:30Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.821815 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:30Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.841866 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:30Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.857852 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:30Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.874313 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:30Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.876870 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.876907 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.876917 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.876936 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.876949 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:30Z","lastTransitionTime":"2026-01-06T08:15:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.881752 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.979409 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.979718 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.979812 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.979895 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:30 crc kubenswrapper[4784]: I0106 08:15:30.979976 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:30Z","lastTransitionTime":"2026-01-06T08:15:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.082901 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.082961 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.082977 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.083001 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.083018 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:31Z","lastTransitionTime":"2026-01-06T08:15:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.185697 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.185787 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.185805 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.185882 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.185902 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:31Z","lastTransitionTime":"2026-01-06T08:15:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.289578 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.289635 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.289650 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.289678 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.289691 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:31Z","lastTransitionTime":"2026-01-06T08:15:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.391586 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.391615 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.391623 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.391635 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.391644 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:31Z","lastTransitionTime":"2026-01-06T08:15:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.494869 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.494912 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.494923 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.494953 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.494968 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:31Z","lastTransitionTime":"2026-01-06T08:15:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.582847 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.582957 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.597682 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.597718 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.597727 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.597741 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.597751 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:31Z","lastTransitionTime":"2026-01-06T08:15:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.608219 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.611494 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.620051 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.636984 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.649341 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.661342 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.677901 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.690025 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.699798 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.699836 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.699845 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.699863 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.699873 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:31Z","lastTransitionTime":"2026-01-06T08:15:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.703532 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.718336 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.730780 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.754270 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.764445 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.776458 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.786823 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.798373 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.801956 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.801981 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.801991 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.802005 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.802013 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:31Z","lastTransitionTime":"2026-01-06T08:15:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.825428 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.836585 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.847495 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.863170 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.878815 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.891010 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.904694 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.904823 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.904872 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.904889 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.904912 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.904930 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:31Z","lastTransitionTime":"2026-01-06T08:15:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.920588 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.932732 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.946258 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.965144 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:31 crc kubenswrapper[4784]: I0106 08:15:31.985980 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.002595 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.002738 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.002771 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.002851 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:15:48.002813253 +0000 UTC m=+50.048986130 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.002874 4784 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.002975 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:48.002956957 +0000 UTC m=+50.049129794 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.003011 4784 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.003186 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:48.003146893 +0000 UTC m=+50.049319770 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.007177 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.007218 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.007231 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.007248 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.007262 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:32Z","lastTransitionTime":"2026-01-06T08:15:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.008594 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.024246 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.103933 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.104094 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.104166 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.104202 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.104217 4784 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.104286 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:48.104265149 +0000 UTC m=+50.150437996 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.104311 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.104349 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.104374 4784 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.104463 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-06 08:15:48.104433644 +0000 UTC m=+50.150606521 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.109463 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.109522 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.109576 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.109602 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.109624 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:32Z","lastTransitionTime":"2026-01-06T08:15:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.212171 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.212203 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.212211 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.212223 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.212231 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:32Z","lastTransitionTime":"2026-01-06T08:15:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.275971 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.291348 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.309330 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.311586 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.311639 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.311666 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.311741 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.311935 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:32 crc kubenswrapper[4784]: E0106 08:15:32.312033 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.314913 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.314962 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.314985 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.315014 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.315040 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:32Z","lastTransitionTime":"2026-01-06T08:15:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.324015 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.335421 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.352114 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.370905 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.389882 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.410139 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.418220 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.418269 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.418282 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.418298 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.418309 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:32Z","lastTransitionTime":"2026-01-06T08:15:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.426673 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.449297 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.472283 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.492863 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.520398 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.520449 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.520459 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.520479 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.520495 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:32Z","lastTransitionTime":"2026-01-06T08:15:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.521659 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.536171 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:32Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.624336 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.624391 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.624408 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.624432 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.624449 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:32Z","lastTransitionTime":"2026-01-06T08:15:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.727681 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.727759 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.727779 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.727804 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.727823 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:32Z","lastTransitionTime":"2026-01-06T08:15:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.830509 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.830618 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.830644 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.830674 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.830695 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:32Z","lastTransitionTime":"2026-01-06T08:15:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.933628 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.933668 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.933676 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.933690 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:32 crc kubenswrapper[4784]: I0106 08:15:32.933699 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:32Z","lastTransitionTime":"2026-01-06T08:15:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.036167 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.036264 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.036283 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.036314 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.036338 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:33Z","lastTransitionTime":"2026-01-06T08:15:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.138994 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.139026 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.139064 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.139083 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.139094 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:33Z","lastTransitionTime":"2026-01-06T08:15:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.243226 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.243795 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.243897 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.243972 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.244042 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:33Z","lastTransitionTime":"2026-01-06T08:15:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.347392 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.347463 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.347481 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.347508 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.347525 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:33Z","lastTransitionTime":"2026-01-06T08:15:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.450266 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.450329 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.450346 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.450367 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.450381 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:33Z","lastTransitionTime":"2026-01-06T08:15:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.552354 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.552460 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.552474 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.552499 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.552513 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:33Z","lastTransitionTime":"2026-01-06T08:15:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.591956 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/0.log" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.596381 4784 generic.go:334] "Generic (PLEG): container finished" podID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerID="758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62" exitCode=1 Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.596457 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerDied","Data":"758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62"} Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.597534 4784 scope.go:117] "RemoveContainer" containerID="758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.609908 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:33Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.623143 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:33Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.646469 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:33Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.655438 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.655486 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.655503 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.655525 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.655576 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:33Z","lastTransitionTime":"2026-01-06T08:15:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.660420 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:33Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.677982 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:33Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.698383 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:33Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.722085 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:33Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.736378 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:33Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.753015 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:33Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.758490 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.758527 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.758536 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.758560 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.758570 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:33Z","lastTransitionTime":"2026-01-06T08:15:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.770113 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:33Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.782486 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:33Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.864343 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.864388 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.864398 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.864509 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.864534 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:33Z","lastTransitionTime":"2026-01-06T08:15:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.966787 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.966832 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.966845 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.966861 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:33 crc kubenswrapper[4784]: I0106 08:15:33.966871 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:33Z","lastTransitionTime":"2026-01-06T08:15:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.200388 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.200445 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.200562 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.200596 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.200610 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:34Z","lastTransitionTime":"2026-01-06T08:15:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.215687 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.242335 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"message\\\":\\\"oller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0106 08:15:32.139817 6099 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0106 08:15:32.139879 6099 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:32.139891 6099 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:32.139935 6099 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0106 08:15:32.139949 6099 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:32.139955 6099 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0106 08:15:32.139968 6099 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0106 08:15:32.139988 6099 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:32.139991 6099 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0106 08:15:32.139998 6099 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0106 08:15:32.140006 6099 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0106 08:15:32.140014 6099 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:32.140067 6099 factory.go:656] Stopping watch factory\\\\nI0106 08:15:32.140085 6099 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:32.140102 6099 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.262327 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.303156 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.303194 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.303211 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.303235 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.303253 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:34Z","lastTransitionTime":"2026-01-06T08:15:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.314155 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:34 crc kubenswrapper[4784]: E0106 08:15:34.314301 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.314895 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:34 crc kubenswrapper[4784]: E0106 08:15:34.315019 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.317737 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:34 crc kubenswrapper[4784]: E0106 08:15:34.317895 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.405321 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.405403 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.405426 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.405454 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.405474 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:34Z","lastTransitionTime":"2026-01-06T08:15:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.508728 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.508786 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.508804 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.508827 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.508847 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:34Z","lastTransitionTime":"2026-01-06T08:15:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.534853 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph"] Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.535658 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.538707 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.539993 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.553043 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.578100 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.596526 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.602926 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/0.log" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.603871 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xx86g\" (UniqueName: \"kubernetes.io/projected/a1d667f3-955d-428f-bbba-0e05e712b235-kube-api-access-xx86g\") pod \"ovnkube-control-plane-749d76644c-cf6ph\" (UID: \"a1d667f3-955d-428f-bbba-0e05e712b235\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.603942 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a1d667f3-955d-428f-bbba-0e05e712b235-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-cf6ph\" (UID: \"a1d667f3-955d-428f-bbba-0e05e712b235\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.604014 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a1d667f3-955d-428f-bbba-0e05e712b235-env-overrides\") pod \"ovnkube-control-plane-749d76644c-cf6ph\" (UID: \"a1d667f3-955d-428f-bbba-0e05e712b235\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.604086 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a1d667f3-955d-428f-bbba-0e05e712b235-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-cf6ph\" (UID: \"a1d667f3-955d-428f-bbba-0e05e712b235\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.607004 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerStarted","Data":"3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6"} Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.607664 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.611846 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.611881 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.611894 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.611910 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.611923 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:34Z","lastTransitionTime":"2026-01-06T08:15:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.621849 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.640909 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.661267 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.676865 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.689991 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.702243 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.704805 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a1d667f3-955d-428f-bbba-0e05e712b235-env-overrides\") pod \"ovnkube-control-plane-749d76644c-cf6ph\" (UID: \"a1d667f3-955d-428f-bbba-0e05e712b235\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.704847 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a1d667f3-955d-428f-bbba-0e05e712b235-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-cf6ph\" (UID: \"a1d667f3-955d-428f-bbba-0e05e712b235\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.704888 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xx86g\" (UniqueName: \"kubernetes.io/projected/a1d667f3-955d-428f-bbba-0e05e712b235-kube-api-access-xx86g\") pod \"ovnkube-control-plane-749d76644c-cf6ph\" (UID: \"a1d667f3-955d-428f-bbba-0e05e712b235\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.704953 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a1d667f3-955d-428f-bbba-0e05e712b235-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-cf6ph\" (UID: \"a1d667f3-955d-428f-bbba-0e05e712b235\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.705564 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a1d667f3-955d-428f-bbba-0e05e712b235-env-overrides\") pod \"ovnkube-control-plane-749d76644c-cf6ph\" (UID: \"a1d667f3-955d-428f-bbba-0e05e712b235\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.705694 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a1d667f3-955d-428f-bbba-0e05e712b235-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-cf6ph\" (UID: \"a1d667f3-955d-428f-bbba-0e05e712b235\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.713155 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a1d667f3-955d-428f-bbba-0e05e712b235-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-cf6ph\" (UID: \"a1d667f3-955d-428f-bbba-0e05e712b235\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.714209 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.714227 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.714235 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.714247 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.714256 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:34Z","lastTransitionTime":"2026-01-06T08:15:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.716106 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.724194 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xx86g\" (UniqueName: \"kubernetes.io/projected/a1d667f3-955d-428f-bbba-0e05e712b235-kube-api-access-xx86g\") pod \"ovnkube-control-plane-749d76644c-cf6ph\" (UID: \"a1d667f3-955d-428f-bbba-0e05e712b235\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.732536 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.744627 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.756826 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.782869 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"message\\\":\\\"oller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0106 08:15:32.139817 6099 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0106 08:15:32.139879 6099 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:32.139891 6099 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:32.139935 6099 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0106 08:15:32.139949 6099 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:32.139955 6099 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0106 08:15:32.139968 6099 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0106 08:15:32.139988 6099 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:32.139991 6099 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0106 08:15:32.139998 6099 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0106 08:15:32.140006 6099 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0106 08:15:32.140014 6099 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:32.140067 6099 factory.go:656] Stopping watch factory\\\\nI0106 08:15:32.140085 6099 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:32.140102 6099 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.801908 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.817083 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.817115 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.817125 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.817138 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.817147 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:34Z","lastTransitionTime":"2026-01-06T08:15:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.818066 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.831475 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.843258 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.860141 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.864453 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"message\\\":\\\"oller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0106 08:15:32.139817 6099 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0106 08:15:32.139879 6099 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:32.139891 6099 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:32.139935 6099 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0106 08:15:32.139949 6099 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:32.139955 6099 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0106 08:15:32.139968 6099 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0106 08:15:32.139988 6099 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:32.139991 6099 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0106 08:15:32.139998 6099 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0106 08:15:32.140006 6099 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0106 08:15:32.140014 6099 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:32.140067 6099 factory.go:656] Stopping watch factory\\\\nI0106 08:15:32.140085 6099 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:32.140102 6099 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:29Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.881781 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: W0106 08:15:34.884697 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda1d667f3_955d_428f_bbba_0e05e712b235.slice/crio-16a85f68aa8ff6ae9c338cedad8fb0e70ee1e8a4a2a4446704bb1c417d75e9df WatchSource:0}: Error finding container 16a85f68aa8ff6ae9c338cedad8fb0e70ee1e8a4a2a4446704bb1c417d75e9df: Status 404 returned error can't find the container with id 16a85f68aa8ff6ae9c338cedad8fb0e70ee1e8a4a2a4446704bb1c417d75e9df Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.896467 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.913403 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.920157 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.920217 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.920240 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.920264 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.920280 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:34Z","lastTransitionTime":"2026-01-06T08:15:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.929279 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.941006 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.955596 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.973680 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:34 crc kubenswrapper[4784]: I0106 08:15:34.991788 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:34Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.010447 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.023326 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.023389 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.023407 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.023433 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.023454 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:35Z","lastTransitionTime":"2026-01-06T08:15:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.032492 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.047845 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.126448 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.126476 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.126484 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.126496 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.126504 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:35Z","lastTransitionTime":"2026-01-06T08:15:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.229271 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.229315 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.229326 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.229344 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.229356 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:35Z","lastTransitionTime":"2026-01-06T08:15:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.332724 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.332774 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.332790 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.332808 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.332819 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:35Z","lastTransitionTime":"2026-01-06T08:15:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.436349 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.436410 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.436428 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.436452 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.436469 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:35Z","lastTransitionTime":"2026-01-06T08:15:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.541974 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.542036 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.542057 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.542080 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.542099 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:35Z","lastTransitionTime":"2026-01-06T08:15:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.614378 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/1.log" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.615459 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/0.log" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.619212 4784 generic.go:334] "Generic (PLEG): container finished" podID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerID="3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6" exitCode=1 Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.619289 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerDied","Data":"3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6"} Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.619434 4784 scope.go:117] "RemoveContainer" containerID="758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.620676 4784 scope.go:117] "RemoveContainer" containerID="3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6" Jan 06 08:15:35 crc kubenswrapper[4784]: E0106 08:15:35.621022 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\"" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.622723 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" event={"ID":"a1d667f3-955d-428f-bbba-0e05e712b235","Type":"ContainerStarted","Data":"7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3"} Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.622781 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" event={"ID":"a1d667f3-955d-428f-bbba-0e05e712b235","Type":"ContainerStarted","Data":"16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b"} Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.622801 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" event={"ID":"a1d667f3-955d-428f-bbba-0e05e712b235","Type":"ContainerStarted","Data":"16a85f68aa8ff6ae9c338cedad8fb0e70ee1e8a4a2a4446704bb1c417d75e9df"} Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.639475 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.644496 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.644579 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.644599 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.644623 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.644640 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:35Z","lastTransitionTime":"2026-01-06T08:15:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.667757 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"message\\\":\\\"oller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0106 08:15:32.139817 6099 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0106 08:15:32.139879 6099 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:32.139891 6099 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:32.139935 6099 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0106 08:15:32.139949 6099 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:32.139955 6099 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0106 08:15:32.139968 6099 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0106 08:15:32.139988 6099 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:32.139991 6099 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0106 08:15:32.139998 6099 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0106 08:15:32.140006 6099 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0106 08:15:32.140014 6099 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:32.140067 6099 factory.go:656] Stopping watch factory\\\\nI0106 08:15:32.140085 6099 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:32.140102 6099 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:29Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"message\\\":\\\"=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"1f62a432-33b9-495d-83b2-d1dbe6961325\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.109\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0106 08:15:35.150636 6240 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0106 08:15:35.150710 6240 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.685266 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.701174 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.717146 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.731170 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.744441 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.747313 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.747341 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.747352 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.747365 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.747374 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:35Z","lastTransitionTime":"2026-01-06T08:15:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.756755 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.766504 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.786777 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.801630 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.815756 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.827602 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.849663 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.849733 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.849756 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.849784 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.849809 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:35Z","lastTransitionTime":"2026-01-06T08:15:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.859390 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.886725 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.906950 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.916127 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.927462 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.944716 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.951961 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.951990 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.951998 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.952010 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.952018 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:35Z","lastTransitionTime":"2026-01-06T08:15:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.958649 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.978414 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:35 crc kubenswrapper[4784]: I0106 08:15:35.989776 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:35Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.006499 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.020273 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.034418 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.055065 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.055092 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.055100 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.055113 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.055122 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:36Z","lastTransitionTime":"2026-01-06T08:15:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.063007 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"message\\\":\\\"oller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0106 08:15:32.139817 6099 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0106 08:15:32.139879 6099 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:32.139891 6099 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:32.139935 6099 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0106 08:15:32.139949 6099 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:32.139955 6099 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0106 08:15:32.139968 6099 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0106 08:15:32.139988 6099 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:32.139991 6099 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0106 08:15:32.139998 6099 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0106 08:15:32.140006 6099 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0106 08:15:32.140014 6099 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:32.140067 6099 factory.go:656] Stopping watch factory\\\\nI0106 08:15:32.140085 6099 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:32.140102 6099 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:29Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"message\\\":\\\"=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"1f62a432-33b9-495d-83b2-d1dbe6961325\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.109\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0106 08:15:35.150636 6240 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0106 08:15:35.150710 6240 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.073929 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.087898 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.103429 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.119587 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.157742 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.158052 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.158185 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.158302 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.158413 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:36Z","lastTransitionTime":"2026-01-06T08:15:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.260820 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.261089 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.261179 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.261271 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.261382 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:36Z","lastTransitionTime":"2026-01-06T08:15:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.312294 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.312328 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.312331 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:36 crc kubenswrapper[4784]: E0106 08:15:36.312945 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:36 crc kubenswrapper[4784]: E0106 08:15:36.313101 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:36 crc kubenswrapper[4784]: E0106 08:15:36.313284 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.364148 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.364180 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.364188 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.364202 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.364211 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:36Z","lastTransitionTime":"2026-01-06T08:15:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.423761 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-xfktc"] Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.424574 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:36 crc kubenswrapper[4784]: E0106 08:15:36.424695 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.442238 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.462420 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.467181 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.467349 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.467508 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.467741 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.467903 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:36Z","lastTransitionTime":"2026-01-06T08:15:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.480711 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.511170 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://758ada1baf51569d1361c09294a477c903706a95fc88f89872ef38271abc8e62\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"message\\\":\\\"oller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0106 08:15:32.139817 6099 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0106 08:15:32.139879 6099 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:32.139891 6099 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:32.139935 6099 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0106 08:15:32.139949 6099 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:32.139955 6099 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0106 08:15:32.139968 6099 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0106 08:15:32.139988 6099 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:32.139991 6099 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0106 08:15:32.139998 6099 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0106 08:15:32.140006 6099 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0106 08:15:32.140014 6099 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:32.140067 6099 factory.go:656] Stopping watch factory\\\\nI0106 08:15:32.140085 6099 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:32.140102 6099 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:29Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"message\\\":\\\"=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"1f62a432-33b9-495d-83b2-d1dbe6961325\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.109\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0106 08:15:35.150636 6240 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0106 08:15:35.150710 6240 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.524471 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9g959\" (UniqueName: \"kubernetes.io/projected/e957a369-1cc7-450b-821f-3ee12341caef-kube-api-access-9g959\") pod \"network-metrics-daemon-xfktc\" (UID: \"e957a369-1cc7-450b-821f-3ee12341caef\") " pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.524741 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs\") pod \"network-metrics-daemon-xfktc\" (UID: \"e957a369-1cc7-450b-821f-3ee12341caef\") " pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.527199 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.540803 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.562992 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.571680 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.571743 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.571762 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.571787 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.571807 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:36Z","lastTransitionTime":"2026-01-06T08:15:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.581249 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.597163 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.620865 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.625890 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs\") pod \"network-metrics-daemon-xfktc\" (UID: \"e957a369-1cc7-450b-821f-3ee12341caef\") " pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.625940 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9g959\" (UniqueName: \"kubernetes.io/projected/e957a369-1cc7-450b-821f-3ee12341caef-kube-api-access-9g959\") pod \"network-metrics-daemon-xfktc\" (UID: \"e957a369-1cc7-450b-821f-3ee12341caef\") " pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:36 crc kubenswrapper[4784]: E0106 08:15:36.626154 4784 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:15:36 crc kubenswrapper[4784]: E0106 08:15:36.626349 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs podName:e957a369-1cc7-450b-821f-3ee12341caef nodeName:}" failed. No retries permitted until 2026-01-06 08:15:37.12625122 +0000 UTC m=+39.172424107 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs") pod "network-metrics-daemon-xfktc" (UID: "e957a369-1cc7-450b-821f-3ee12341caef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.630627 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/1.log" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.636720 4784 scope.go:117] "RemoveContainer" containerID="3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6" Jan 06 08:15:36 crc kubenswrapper[4784]: E0106 08:15:36.637651 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\"" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.639743 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.661257 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9g959\" (UniqueName: \"kubernetes.io/projected/e957a369-1cc7-450b-821f-3ee12341caef-kube-api-access-9g959\") pod \"network-metrics-daemon-xfktc\" (UID: \"e957a369-1cc7-450b-821f-3ee12341caef\") " pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.663963 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.675593 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.675658 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.675675 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.675698 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.675716 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:36Z","lastTransitionTime":"2026-01-06T08:15:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.683373 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.699652 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.710815 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.723225 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.741324 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.757187 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.774367 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.778588 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.778644 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.778663 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.778690 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.778709 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:36Z","lastTransitionTime":"2026-01-06T08:15:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.794853 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.813506 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.826658 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.843246 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.859932 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.873734 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.880908 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.880942 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.880952 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.880968 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.880978 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:36Z","lastTransitionTime":"2026-01-06T08:15:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.887722 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.899235 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.914257 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.931383 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.945979 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.970561 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"message\\\":\\\"=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"1f62a432-33b9-495d-83b2-d1dbe6961325\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.109\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0106 08:15:35.150636 6240 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0106 08:15:35.150710 6240 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.983091 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.983140 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.983151 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.983168 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.983180 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:36Z","lastTransitionTime":"2026-01-06T08:15:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:36 crc kubenswrapper[4784]: I0106 08:15:36.984366 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:36Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.085574 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.085623 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.085636 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.085656 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.085670 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:37Z","lastTransitionTime":"2026-01-06T08:15:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.131184 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs\") pod \"network-metrics-daemon-xfktc\" (UID: \"e957a369-1cc7-450b-821f-3ee12341caef\") " pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:37 crc kubenswrapper[4784]: E0106 08:15:37.131364 4784 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:15:37 crc kubenswrapper[4784]: E0106 08:15:37.131416 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs podName:e957a369-1cc7-450b-821f-3ee12341caef nodeName:}" failed. No retries permitted until 2026-01-06 08:15:38.131400084 +0000 UTC m=+40.177572921 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs") pod "network-metrics-daemon-xfktc" (UID: "e957a369-1cc7-450b-821f-3ee12341caef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.188915 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.189295 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.189455 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.189661 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.189821 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:37Z","lastTransitionTime":"2026-01-06T08:15:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.292959 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.293229 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.293388 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.293536 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.293738 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:37Z","lastTransitionTime":"2026-01-06T08:15:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.397810 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.398164 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.398374 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.398626 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.398841 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:37Z","lastTransitionTime":"2026-01-06T08:15:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.506302 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.506354 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.506372 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.506394 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.506410 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:37Z","lastTransitionTime":"2026-01-06T08:15:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.636277 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.636321 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.636337 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.636359 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.636376 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:37Z","lastTransitionTime":"2026-01-06T08:15:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.739829 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.739886 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.739910 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.739938 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.739962 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:37Z","lastTransitionTime":"2026-01-06T08:15:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.842586 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.842626 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.842638 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.842654 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.842666 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:37Z","lastTransitionTime":"2026-01-06T08:15:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.945579 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.945643 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.945667 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.945699 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:37 crc kubenswrapper[4784]: I0106 08:15:37.945725 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:37Z","lastTransitionTime":"2026-01-06T08:15:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.049045 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.049124 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.049147 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.049179 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.049203 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:38Z","lastTransitionTime":"2026-01-06T08:15:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.141336 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs\") pod \"network-metrics-daemon-xfktc\" (UID: \"e957a369-1cc7-450b-821f-3ee12341caef\") " pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:38 crc kubenswrapper[4784]: E0106 08:15:38.141447 4784 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:15:38 crc kubenswrapper[4784]: E0106 08:15:38.141505 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs podName:e957a369-1cc7-450b-821f-3ee12341caef nodeName:}" failed. No retries permitted until 2026-01-06 08:15:40.141490043 +0000 UTC m=+42.187662870 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs") pod "network-metrics-daemon-xfktc" (UID: "e957a369-1cc7-450b-821f-3ee12341caef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.151832 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.151871 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.151880 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.151894 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.151904 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:38Z","lastTransitionTime":"2026-01-06T08:15:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.254509 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.254611 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.254639 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.254666 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.254692 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:38Z","lastTransitionTime":"2026-01-06T08:15:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.311436 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.311441 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.311612 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:38 crc kubenswrapper[4784]: E0106 08:15:38.311708 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.311824 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:38 crc kubenswrapper[4784]: E0106 08:15:38.312010 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:38 crc kubenswrapper[4784]: E0106 08:15:38.312141 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:38 crc kubenswrapper[4784]: E0106 08:15:38.312268 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.347531 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"message\\\":\\\"=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"1f62a432-33b9-495d-83b2-d1dbe6961325\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.109\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0106 08:15:35.150636 6240 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0106 08:15:35.150710 6240 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.357645 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.357720 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.357743 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.357774 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.357795 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:38Z","lastTransitionTime":"2026-01-06T08:15:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.365469 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.384988 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.407621 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.429219 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.445738 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.460734 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.460765 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.460781 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.460840 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.460855 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:38Z","lastTransitionTime":"2026-01-06T08:15:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.464297 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.493805 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.517919 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.536459 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.561605 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.563264 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.563305 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.563318 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.563336 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.563347 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:38Z","lastTransitionTime":"2026-01-06T08:15:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.575353 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.588844 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.604652 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.618137 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.633090 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.666279 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.666317 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.666329 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.666344 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.666356 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:38Z","lastTransitionTime":"2026-01-06T08:15:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.768771 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.768819 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.768839 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.768862 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.768878 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:38Z","lastTransitionTime":"2026-01-06T08:15:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.872225 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.872294 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.872316 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.872345 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.872368 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:38Z","lastTransitionTime":"2026-01-06T08:15:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.975329 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.975396 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.975421 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.975449 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:38 crc kubenswrapper[4784]: I0106 08:15:38.975470 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:38Z","lastTransitionTime":"2026-01-06T08:15:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.078108 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.078168 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.078187 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.078212 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.078228 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:39Z","lastTransitionTime":"2026-01-06T08:15:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.180778 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.180825 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.180843 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.180859 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.180871 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:39Z","lastTransitionTime":"2026-01-06T08:15:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.283537 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.283619 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.283631 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.283647 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.283658 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:39Z","lastTransitionTime":"2026-01-06T08:15:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.387081 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.387161 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.387183 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.387213 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.387235 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:39Z","lastTransitionTime":"2026-01-06T08:15:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.489921 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.489995 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.490029 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.490058 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.490083 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:39Z","lastTransitionTime":"2026-01-06T08:15:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.593342 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.593388 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.593396 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.593410 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.593420 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:39Z","lastTransitionTime":"2026-01-06T08:15:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.695483 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.695586 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.695604 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.695626 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.695643 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:39Z","lastTransitionTime":"2026-01-06T08:15:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.798791 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.798849 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.798866 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.798895 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.798917 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:39Z","lastTransitionTime":"2026-01-06T08:15:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.855286 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.855356 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.855377 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.855404 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.855421 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:39Z","lastTransitionTime":"2026-01-06T08:15:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:39 crc kubenswrapper[4784]: E0106 08:15:39.876286 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:39Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.881738 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.881814 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.881833 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.881857 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.881874 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:39Z","lastTransitionTime":"2026-01-06T08:15:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:39 crc kubenswrapper[4784]: E0106 08:15:39.902249 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:39Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.909750 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.909844 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.909871 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.909906 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.909944 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:39Z","lastTransitionTime":"2026-01-06T08:15:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:39 crc kubenswrapper[4784]: E0106 08:15:39.934058 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:39Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.940609 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.940692 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.940711 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.940767 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.940787 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:39Z","lastTransitionTime":"2026-01-06T08:15:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:39 crc kubenswrapper[4784]: E0106 08:15:39.961476 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:39Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.966412 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.966478 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.966503 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.966535 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.966597 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:39Z","lastTransitionTime":"2026-01-06T08:15:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:39 crc kubenswrapper[4784]: E0106 08:15:39.986230 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:39Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:39 crc kubenswrapper[4784]: E0106 08:15:39.986498 4784 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.988905 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.988981 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.989016 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.989045 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:39 crc kubenswrapper[4784]: I0106 08:15:39.989067 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:39Z","lastTransitionTime":"2026-01-06T08:15:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.092317 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.092381 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.092404 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.092431 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.092448 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:40Z","lastTransitionTime":"2026-01-06T08:15:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.163049 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs\") pod \"network-metrics-daemon-xfktc\" (UID: \"e957a369-1cc7-450b-821f-3ee12341caef\") " pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:40 crc kubenswrapper[4784]: E0106 08:15:40.163301 4784 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:15:40 crc kubenswrapper[4784]: E0106 08:15:40.163408 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs podName:e957a369-1cc7-450b-821f-3ee12341caef nodeName:}" failed. No retries permitted until 2026-01-06 08:15:44.163380626 +0000 UTC m=+46.209553503 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs") pod "network-metrics-daemon-xfktc" (UID: "e957a369-1cc7-450b-821f-3ee12341caef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.196008 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.196061 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.196078 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.196099 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.196118 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:40Z","lastTransitionTime":"2026-01-06T08:15:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.299528 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.299630 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.299652 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.299678 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.299695 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:40Z","lastTransitionTime":"2026-01-06T08:15:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.311854 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:40 crc kubenswrapper[4784]: E0106 08:15:40.312067 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.312804 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.312853 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:40 crc kubenswrapper[4784]: E0106 08:15:40.312956 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.312874 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:40 crc kubenswrapper[4784]: E0106 08:15:40.313165 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:15:40 crc kubenswrapper[4784]: E0106 08:15:40.313287 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.403055 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.403125 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.403143 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.403169 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.403187 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:40Z","lastTransitionTime":"2026-01-06T08:15:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.506797 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.506877 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.506902 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.506931 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.506952 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:40Z","lastTransitionTime":"2026-01-06T08:15:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.611480 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.611538 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.611594 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.611621 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.611644 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:40Z","lastTransitionTime":"2026-01-06T08:15:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.714882 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.714957 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.714984 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.715009 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.715028 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:40Z","lastTransitionTime":"2026-01-06T08:15:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.818496 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.818601 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.818624 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.818652 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.818673 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:40Z","lastTransitionTime":"2026-01-06T08:15:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.921409 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.921489 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.921515 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.921582 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:40 crc kubenswrapper[4784]: I0106 08:15:40.921606 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:40Z","lastTransitionTime":"2026-01-06T08:15:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.024597 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.024673 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.024695 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.024725 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.024747 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:41Z","lastTransitionTime":"2026-01-06T08:15:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.127849 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.127916 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.127940 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.127971 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.127995 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:41Z","lastTransitionTime":"2026-01-06T08:15:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.231432 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.232294 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.232328 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.232360 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.232386 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:41Z","lastTransitionTime":"2026-01-06T08:15:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.335710 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.335796 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.335818 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.335843 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.335862 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:41Z","lastTransitionTime":"2026-01-06T08:15:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.439428 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.439488 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.439507 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.439530 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.439580 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:41Z","lastTransitionTime":"2026-01-06T08:15:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.546807 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.547460 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.547491 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.547521 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.547539 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:41Z","lastTransitionTime":"2026-01-06T08:15:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.651117 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.651184 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.651201 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.651227 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.651251 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:41Z","lastTransitionTime":"2026-01-06T08:15:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.754259 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.754318 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.754336 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.754359 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.754375 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:41Z","lastTransitionTime":"2026-01-06T08:15:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.857462 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.857528 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.857573 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.857596 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.857615 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:41Z","lastTransitionTime":"2026-01-06T08:15:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.961005 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.961319 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.961495 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.961684 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:41 crc kubenswrapper[4784]: I0106 08:15:41.961841 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:41Z","lastTransitionTime":"2026-01-06T08:15:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.065707 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.066073 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.066254 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.066439 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.066738 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:42Z","lastTransitionTime":"2026-01-06T08:15:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.168977 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.169029 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.169039 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.169052 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.169060 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:42Z","lastTransitionTime":"2026-01-06T08:15:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.272628 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.272691 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.272714 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.272742 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.272758 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:42Z","lastTransitionTime":"2026-01-06T08:15:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.311861 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.311928 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:42 crc kubenswrapper[4784]: E0106 08:15:42.312009 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.311873 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.311891 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:42 crc kubenswrapper[4784]: E0106 08:15:42.312124 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:15:42 crc kubenswrapper[4784]: E0106 08:15:42.312195 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:42 crc kubenswrapper[4784]: E0106 08:15:42.312260 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.375949 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.375981 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.375990 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.376003 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.376013 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:42Z","lastTransitionTime":"2026-01-06T08:15:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.478813 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.478866 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.478879 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.478896 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.478911 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:42Z","lastTransitionTime":"2026-01-06T08:15:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.582376 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.582467 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.582492 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.582520 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.582537 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:42Z","lastTransitionTime":"2026-01-06T08:15:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.685708 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.685806 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.685838 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.685874 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.685898 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:42Z","lastTransitionTime":"2026-01-06T08:15:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.789665 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.789729 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.789751 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.789779 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.789802 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:42Z","lastTransitionTime":"2026-01-06T08:15:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.893504 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.893591 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.893609 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.893634 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.893651 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:42Z","lastTransitionTime":"2026-01-06T08:15:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.997222 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.997286 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.997308 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.997333 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:42 crc kubenswrapper[4784]: I0106 08:15:42.997351 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:42Z","lastTransitionTime":"2026-01-06T08:15:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.100099 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.100182 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.100207 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.100238 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.100260 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:43Z","lastTransitionTime":"2026-01-06T08:15:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.203885 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.203946 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.203956 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.203976 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.203986 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:43Z","lastTransitionTime":"2026-01-06T08:15:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.306802 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.306900 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.306980 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.307007 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.307058 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:43Z","lastTransitionTime":"2026-01-06T08:15:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.410271 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.410342 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.410358 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.410381 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.410399 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:43Z","lastTransitionTime":"2026-01-06T08:15:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.513359 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.513423 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.513443 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.513468 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.513486 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:43Z","lastTransitionTime":"2026-01-06T08:15:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.617165 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.617209 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.617218 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.617231 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.617239 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:43Z","lastTransitionTime":"2026-01-06T08:15:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.719711 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.719756 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.719767 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.719785 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.719797 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:43Z","lastTransitionTime":"2026-01-06T08:15:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.823513 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.823603 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.823623 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.823648 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.823667 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:43Z","lastTransitionTime":"2026-01-06T08:15:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.928068 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.928159 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.928179 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.928203 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:43 crc kubenswrapper[4784]: I0106 08:15:43.928219 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:43Z","lastTransitionTime":"2026-01-06T08:15:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.031647 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.031728 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.031753 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.031784 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.031813 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:44Z","lastTransitionTime":"2026-01-06T08:15:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.135635 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.135703 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.135728 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.135758 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.135782 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:44Z","lastTransitionTime":"2026-01-06T08:15:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.209456 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs\") pod \"network-metrics-daemon-xfktc\" (UID: \"e957a369-1cc7-450b-821f-3ee12341caef\") " pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:44 crc kubenswrapper[4784]: E0106 08:15:44.209732 4784 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:15:44 crc kubenswrapper[4784]: E0106 08:15:44.209849 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs podName:e957a369-1cc7-450b-821f-3ee12341caef nodeName:}" failed. No retries permitted until 2026-01-06 08:15:52.209808704 +0000 UTC m=+54.255981581 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs") pod "network-metrics-daemon-xfktc" (UID: "e957a369-1cc7-450b-821f-3ee12341caef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.238837 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.238920 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.238948 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.238983 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.239007 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:44Z","lastTransitionTime":"2026-01-06T08:15:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.311915 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.311958 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.312010 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:44 crc kubenswrapper[4784]: E0106 08:15:44.312151 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.312188 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:44 crc kubenswrapper[4784]: E0106 08:15:44.312344 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:15:44 crc kubenswrapper[4784]: E0106 08:15:44.312494 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:44 crc kubenswrapper[4784]: E0106 08:15:44.312663 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.341434 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.341514 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.341538 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.341600 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.341624 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:44Z","lastTransitionTime":"2026-01-06T08:15:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.444647 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.444721 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.444746 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.444774 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.444796 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:44Z","lastTransitionTime":"2026-01-06T08:15:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.547597 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.547661 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.547678 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.547705 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.547724 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:44Z","lastTransitionTime":"2026-01-06T08:15:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.651850 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.651944 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.651967 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.651998 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.652025 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:44Z","lastTransitionTime":"2026-01-06T08:15:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.755432 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.755488 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.755501 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.755519 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.755532 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:44Z","lastTransitionTime":"2026-01-06T08:15:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.858072 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.858157 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.858192 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.858227 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.858249 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:44Z","lastTransitionTime":"2026-01-06T08:15:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.961191 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.961253 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.961268 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.961288 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:44 crc kubenswrapper[4784]: I0106 08:15:44.961301 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:44Z","lastTransitionTime":"2026-01-06T08:15:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.065160 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.065306 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.065324 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.065357 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.065377 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:45Z","lastTransitionTime":"2026-01-06T08:15:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.168603 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.168661 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.168678 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.168700 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.168717 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:45Z","lastTransitionTime":"2026-01-06T08:15:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.270757 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.270813 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.270832 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.270859 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.270881 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:45Z","lastTransitionTime":"2026-01-06T08:15:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.373817 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.373886 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.373905 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.373929 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.373949 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:45Z","lastTransitionTime":"2026-01-06T08:15:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.480424 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.480511 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.480538 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.480597 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.480625 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:45Z","lastTransitionTime":"2026-01-06T08:15:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.584529 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.584682 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.584702 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.584726 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.584744 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:45Z","lastTransitionTime":"2026-01-06T08:15:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.687679 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.687714 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.687722 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.687736 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.687746 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:45Z","lastTransitionTime":"2026-01-06T08:15:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.790223 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.790272 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.790286 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.790308 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.790324 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:45Z","lastTransitionTime":"2026-01-06T08:15:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.893004 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.893235 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.893244 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.893261 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.893271 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:45Z","lastTransitionTime":"2026-01-06T08:15:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.996982 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.997069 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.997092 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.997125 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:45 crc kubenswrapper[4784]: I0106 08:15:45.997152 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:45Z","lastTransitionTime":"2026-01-06T08:15:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.099980 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.100018 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.100029 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.100043 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.100055 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:46Z","lastTransitionTime":"2026-01-06T08:15:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.203103 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.203170 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.203193 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.203221 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.203242 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:46Z","lastTransitionTime":"2026-01-06T08:15:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.305956 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.306001 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.306012 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.306027 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.306039 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:46Z","lastTransitionTime":"2026-01-06T08:15:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.311575 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.311592 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.311683 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.311727 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:46 crc kubenswrapper[4784]: E0106 08:15:46.311768 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:15:46 crc kubenswrapper[4784]: E0106 08:15:46.311838 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:46 crc kubenswrapper[4784]: E0106 08:15:46.311918 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:46 crc kubenswrapper[4784]: E0106 08:15:46.311996 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.408229 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.408291 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.408310 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.408333 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.408352 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:46Z","lastTransitionTime":"2026-01-06T08:15:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.511364 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.511418 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.511437 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.511459 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.511478 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:46Z","lastTransitionTime":"2026-01-06T08:15:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.613850 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.613955 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.613973 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.613996 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.614013 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:46Z","lastTransitionTime":"2026-01-06T08:15:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.717381 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.717432 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.717449 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.717472 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.717489 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:46Z","lastTransitionTime":"2026-01-06T08:15:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.821043 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.821088 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.821105 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.821130 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.821147 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:46Z","lastTransitionTime":"2026-01-06T08:15:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.924289 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.924373 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.924393 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.924418 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:46 crc kubenswrapper[4784]: I0106 08:15:46.924436 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:46Z","lastTransitionTime":"2026-01-06T08:15:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.028054 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.028105 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.028116 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.028133 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.028144 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:47Z","lastTransitionTime":"2026-01-06T08:15:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.131576 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.131655 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.131673 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.131749 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.131782 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:47Z","lastTransitionTime":"2026-01-06T08:15:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.235772 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.235840 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.235862 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.235886 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.235905 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:47Z","lastTransitionTime":"2026-01-06T08:15:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.313389 4784 scope.go:117] "RemoveContainer" containerID="3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.339977 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.340232 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.340242 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.340258 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.340271 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:47Z","lastTransitionTime":"2026-01-06T08:15:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.443017 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.443102 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.443120 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.443144 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.443160 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:47Z","lastTransitionTime":"2026-01-06T08:15:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.546104 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.546158 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.546181 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.546210 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.546231 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:47Z","lastTransitionTime":"2026-01-06T08:15:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.648816 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.648875 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.648896 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.648923 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.648948 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:47Z","lastTransitionTime":"2026-01-06T08:15:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.678714 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/1.log" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.682805 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerStarted","Data":"45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049"} Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.683393 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.707457 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.725640 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.753131 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.753189 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.753201 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.753224 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.753241 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:47Z","lastTransitionTime":"2026-01-06T08:15:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.753516 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.772708 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.793389 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.815694 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.834639 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.852590 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.857723 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.857803 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.857828 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.857860 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.857883 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:47Z","lastTransitionTime":"2026-01-06T08:15:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.871993 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.893870 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.910093 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.932853 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"message\\\":\\\"=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"1f62a432-33b9-495d-83b2-d1dbe6961325\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.109\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0106 08:15:35.150636 6240 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0106 08:15:35.150710 6240 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.943781 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.958906 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.960524 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.960626 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.960643 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.960670 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.960685 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:47Z","lastTransitionTime":"2026-01-06T08:15:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.973751 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:47 crc kubenswrapper[4784]: I0106 08:15:47.986452 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:47Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.055170 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.055367 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:16:20.055335841 +0000 UTC m=+82.101508708 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.055424 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.055626 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.055675 4784 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.055748 4784 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.055754 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:16:20.055734984 +0000 UTC m=+82.101907851 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.055858 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:16:20.055835117 +0000 UTC m=+82.102007994 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.064727 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.064784 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.064798 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.064819 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.064845 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:48Z","lastTransitionTime":"2026-01-06T08:15:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.156248 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.156304 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.156437 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.156466 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.156479 4784 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.156437 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.156551 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-06 08:16:20.156519219 +0000 UTC m=+82.202692056 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.156590 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.156624 4784 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.156729 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-06 08:16:20.156700465 +0000 UTC m=+82.202873342 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.167631 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.167698 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.167722 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.167753 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.167777 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:48Z","lastTransitionTime":"2026-01-06T08:15:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.270437 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.270518 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.270568 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.270601 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.270628 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:48Z","lastTransitionTime":"2026-01-06T08:15:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.312058 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.312152 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.312207 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.312220 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.312061 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.312329 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.312406 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.312498 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.337794 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.353817 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.366991 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.373143 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.373180 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.373192 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.373208 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.373221 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:48Z","lastTransitionTime":"2026-01-06T08:15:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.389828 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"message\\\":\\\"=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"1f62a432-33b9-495d-83b2-d1dbe6961325\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.109\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0106 08:15:35.150636 6240 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0106 08:15:35.150710 6240 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.407627 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.422455 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.439421 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.452132 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.467904 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.475793 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.475833 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.475843 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.475857 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.475866 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:48Z","lastTransitionTime":"2026-01-06T08:15:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.481236 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.493110 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.509150 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.520767 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.537634 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.549649 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.558688 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.578630 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.578660 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.578671 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.578687 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.578698 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:48Z","lastTransitionTime":"2026-01-06T08:15:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.681160 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.681462 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.681595 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.681670 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.681739 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:48Z","lastTransitionTime":"2026-01-06T08:15:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.686858 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/2.log" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.687743 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/1.log" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.690568 4784 generic.go:334] "Generic (PLEG): container finished" podID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerID="45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049" exitCode=1 Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.690614 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerDied","Data":"45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049"} Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.690649 4784 scope.go:117] "RemoveContainer" containerID="3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.691910 4784 scope.go:117] "RemoveContainer" containerID="45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049" Jan 06 08:15:48 crc kubenswrapper[4784]: E0106 08:15:48.692192 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\"" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.707921 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.720962 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.732150 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.750517 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.762137 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.779701 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.784110 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.784177 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.784202 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.784236 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.784281 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:48Z","lastTransitionTime":"2026-01-06T08:15:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.795730 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.808359 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.822386 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.838472 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.853802 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.878821 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.886731 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.886781 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.886798 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.886820 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.886837 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:48Z","lastTransitionTime":"2026-01-06T08:15:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.895451 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.913264 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.942363 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b25e120efcb5a4d38563adcdb6a70dbcc7e5356d6035bfe270ce6aab57328f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"message\\\":\\\"=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"1f62a432-33b9-495d-83b2-d1dbe6961325\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.109\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0106 08:15:35.150636 6240 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0106 08:15:35.150710 6240 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:48Z\\\",\\\"message\\\":\\\"ing reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.392905 6436 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0106 08:15:48.393026 6436 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.393302 6436 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:48.392004 6436 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0106 08:15:48.393603 6436 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0106 08:15:48.393882 6436 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.394386 6436 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:48.394401 6436 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:48.394428 6436 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:48.394448 6436 factory.go:656] Stopping watch factory\\\\nI0106 08:15:48.394462 6436 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:48.394485 6436 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:48.394496 6436 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.957092 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:48Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.989040 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.989107 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.989132 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.989164 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:48 crc kubenswrapper[4784]: I0106 08:15:48.989189 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:48Z","lastTransitionTime":"2026-01-06T08:15:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.092725 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.092782 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.092799 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.092825 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.092843 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:49Z","lastTransitionTime":"2026-01-06T08:15:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.195241 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.195651 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.195865 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.196051 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.196308 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:49Z","lastTransitionTime":"2026-01-06T08:15:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.299948 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.300010 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.300029 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.300054 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.300073 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:49Z","lastTransitionTime":"2026-01-06T08:15:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.403372 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.403447 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.403470 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.403499 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.403522 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:49Z","lastTransitionTime":"2026-01-06T08:15:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.506101 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.506164 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.506183 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.506207 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.506224 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:49Z","lastTransitionTime":"2026-01-06T08:15:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.609686 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.609739 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.609756 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.609778 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.609796 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:49Z","lastTransitionTime":"2026-01-06T08:15:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.697612 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/2.log" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.704150 4784 scope.go:117] "RemoveContainer" containerID="45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049" Jan 06 08:15:49 crc kubenswrapper[4784]: E0106 08:15:49.704714 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\"" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.712846 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.712895 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.712911 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.712938 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.712955 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:49Z","lastTransitionTime":"2026-01-06T08:15:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.724278 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:49Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.744375 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:49Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.763708 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:49Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.783325 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:49Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.807532 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:49Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.815487 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.815521 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.815534 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.815578 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.815590 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:49Z","lastTransitionTime":"2026-01-06T08:15:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.828709 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:49Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.850144 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:49Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.868360 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:49Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.882800 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:49Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.900081 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:49Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.916798 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:49Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.919126 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.919191 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.919210 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.919233 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.919249 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:49Z","lastTransitionTime":"2026-01-06T08:15:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.939709 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:49Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.960767 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:49Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:49 crc kubenswrapper[4784]: I0106 08:15:49.982021 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:49Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.016194 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:48Z\\\",\\\"message\\\":\\\"ing reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.392905 6436 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0106 08:15:48.393026 6436 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.393302 6436 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:48.392004 6436 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0106 08:15:48.393603 6436 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0106 08:15:48.393882 6436 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.394386 6436 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:48.394401 6436 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:48.394428 6436 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:48.394448 6436 factory.go:656] Stopping watch factory\\\\nI0106 08:15:48.394462 6436 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:48.394485 6436 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:48.394496 6436 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.021796 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.021856 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.021875 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.021900 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.021920 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:50Z","lastTransitionTime":"2026-01-06T08:15:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.034790 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.104770 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.104831 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.104843 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.104862 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.104874 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:50Z","lastTransitionTime":"2026-01-06T08:15:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:50 crc kubenswrapper[4784]: E0106 08:15:50.130644 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.138517 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.138608 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.138628 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.138643 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.138652 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:50Z","lastTransitionTime":"2026-01-06T08:15:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:50 crc kubenswrapper[4784]: E0106 08:15:50.152187 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.156508 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.156556 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.156565 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.156579 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.156590 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:50Z","lastTransitionTime":"2026-01-06T08:15:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:50 crc kubenswrapper[4784]: E0106 08:15:50.174011 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.179470 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.179807 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.179916 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.180026 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.180437 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:50Z","lastTransitionTime":"2026-01-06T08:15:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:50 crc kubenswrapper[4784]: E0106 08:15:50.190978 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.194392 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.194513 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.194632 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.194661 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.194671 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:50Z","lastTransitionTime":"2026-01-06T08:15:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:50 crc kubenswrapper[4784]: E0106 08:15:50.206232 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: E0106 08:15:50.206590 4784 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.207935 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.207970 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.207980 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.207997 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.208010 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:50Z","lastTransitionTime":"2026-01-06T08:15:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.311464 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.311875 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.312013 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.312141 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.312414 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:50Z","lastTransitionTime":"2026-01-06T08:15:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.314254 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:50 crc kubenswrapper[4784]: E0106 08:15:50.314356 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.314420 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.314256 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:50 crc kubenswrapper[4784]: E0106 08:15:50.314580 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:15:50 crc kubenswrapper[4784]: E0106 08:15:50.315012 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.316955 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:50 crc kubenswrapper[4784]: E0106 08:15:50.317438 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.415340 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.415762 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.415964 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.416151 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.416337 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:50Z","lastTransitionTime":"2026-01-06T08:15:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.519385 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.519746 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.519980 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.520173 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.520463 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:50Z","lastTransitionTime":"2026-01-06T08:15:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.623827 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.623899 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.623912 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.623930 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.623942 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:50Z","lastTransitionTime":"2026-01-06T08:15:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.677077 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.691190 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.696525 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.718827 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.726495 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.726577 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.726596 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.726617 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.726637 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:50Z","lastTransitionTime":"2026-01-06T08:15:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.739819 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.754031 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.779198 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.798757 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.815265 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.829909 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.830203 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.830293 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.830380 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.830465 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:50Z","lastTransitionTime":"2026-01-06T08:15:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.832346 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.847259 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.864613 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.879829 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.898083 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.914065 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.933282 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.933335 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.933353 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.933377 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.933393 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:50Z","lastTransitionTime":"2026-01-06T08:15:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.934151 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.973706 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:48Z\\\",\\\"message\\\":\\\"ing reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.392905 6436 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0106 08:15:48.393026 6436 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.393302 6436 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:48.392004 6436 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0106 08:15:48.393603 6436 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0106 08:15:48.393882 6436 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.394386 6436 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:48.394401 6436 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:48.394428 6436 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:48.394448 6436 factory.go:656] Stopping watch factory\\\\nI0106 08:15:48.394462 6436 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:48.394485 6436 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:48.394496 6436 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:50 crc kubenswrapper[4784]: I0106 08:15:50.989526 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:50Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.037149 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.037217 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.037235 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.037261 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.037284 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:51Z","lastTransitionTime":"2026-01-06T08:15:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.140705 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.140752 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.140761 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.140777 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.140788 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:51Z","lastTransitionTime":"2026-01-06T08:15:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.242931 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.242989 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.243002 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.243027 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.243039 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:51Z","lastTransitionTime":"2026-01-06T08:15:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.346625 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.346691 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.346702 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.346725 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.346739 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:51Z","lastTransitionTime":"2026-01-06T08:15:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.449683 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.449723 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.449734 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.449751 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.449763 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:51Z","lastTransitionTime":"2026-01-06T08:15:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.552308 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.552363 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.552374 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.552393 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.552409 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:51Z","lastTransitionTime":"2026-01-06T08:15:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.656477 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.656520 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.656533 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.656570 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.656585 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:51Z","lastTransitionTime":"2026-01-06T08:15:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.759344 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.759402 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.759426 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.759452 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.759474 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:51Z","lastTransitionTime":"2026-01-06T08:15:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.863322 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.863376 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.863389 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.863447 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.863466 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:51Z","lastTransitionTime":"2026-01-06T08:15:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.966354 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.966426 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.966445 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.966473 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:51 crc kubenswrapper[4784]: I0106 08:15:51.966491 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:51Z","lastTransitionTime":"2026-01-06T08:15:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.069734 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.069779 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.069791 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.069807 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.069818 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:52Z","lastTransitionTime":"2026-01-06T08:15:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.173287 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.173344 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.173359 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.173379 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.173396 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:52Z","lastTransitionTime":"2026-01-06T08:15:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.275734 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.275815 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.275830 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.275860 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.275879 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:52Z","lastTransitionTime":"2026-01-06T08:15:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.302834 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs\") pod \"network-metrics-daemon-xfktc\" (UID: \"e957a369-1cc7-450b-821f-3ee12341caef\") " pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:52 crc kubenswrapper[4784]: E0106 08:15:52.303134 4784 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:15:52 crc kubenswrapper[4784]: E0106 08:15:52.303330 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs podName:e957a369-1cc7-450b-821f-3ee12341caef nodeName:}" failed. No retries permitted until 2026-01-06 08:16:08.303286548 +0000 UTC m=+70.349459435 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs") pod "network-metrics-daemon-xfktc" (UID: "e957a369-1cc7-450b-821f-3ee12341caef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.311591 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.311613 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.311751 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:52 crc kubenswrapper[4784]: E0106 08:15:52.311815 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.311618 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:52 crc kubenswrapper[4784]: E0106 08:15:52.311921 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:52 crc kubenswrapper[4784]: E0106 08:15:52.312020 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:52 crc kubenswrapper[4784]: E0106 08:15:52.312135 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.378718 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.378806 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.378823 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.378842 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.378854 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:52Z","lastTransitionTime":"2026-01-06T08:15:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.481994 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.482060 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.482084 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.482109 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.482125 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:52Z","lastTransitionTime":"2026-01-06T08:15:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.584818 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.584875 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.584887 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.584902 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.584913 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:52Z","lastTransitionTime":"2026-01-06T08:15:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.688368 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.688447 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.688465 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.688489 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.688503 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:52Z","lastTransitionTime":"2026-01-06T08:15:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.791729 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.791791 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.791810 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.791833 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.791850 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:52Z","lastTransitionTime":"2026-01-06T08:15:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.894647 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.894701 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.894719 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.894741 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.894758 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:52Z","lastTransitionTime":"2026-01-06T08:15:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.997026 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.997074 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.997082 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.997095 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:52 crc kubenswrapper[4784]: I0106 08:15:52.997106 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:52Z","lastTransitionTime":"2026-01-06T08:15:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.100410 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.100466 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.100482 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.100505 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.100519 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:53Z","lastTransitionTime":"2026-01-06T08:15:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.204369 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.204852 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.204881 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.204910 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.204932 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:53Z","lastTransitionTime":"2026-01-06T08:15:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.308008 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.308055 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.308071 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.308099 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.308116 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:53Z","lastTransitionTime":"2026-01-06T08:15:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.412172 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.412255 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.412277 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.412302 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.412320 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:53Z","lastTransitionTime":"2026-01-06T08:15:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.516159 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.516226 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.516243 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.516264 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.516281 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:53Z","lastTransitionTime":"2026-01-06T08:15:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.619092 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.619140 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.619154 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.619174 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.619189 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:53Z","lastTransitionTime":"2026-01-06T08:15:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.722980 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.723054 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.723077 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.723108 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.723132 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:53Z","lastTransitionTime":"2026-01-06T08:15:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.825795 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.825915 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.825978 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.826010 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.826034 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:53Z","lastTransitionTime":"2026-01-06T08:15:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.928996 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.929062 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.929087 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.929117 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:53 crc kubenswrapper[4784]: I0106 08:15:53.929141 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:53Z","lastTransitionTime":"2026-01-06T08:15:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.032990 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.033078 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.033101 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.033131 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.033152 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:54Z","lastTransitionTime":"2026-01-06T08:15:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.136457 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.136516 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.136533 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.136590 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.136610 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:54Z","lastTransitionTime":"2026-01-06T08:15:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.240230 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.240298 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.240321 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.240352 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.240374 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:54Z","lastTransitionTime":"2026-01-06T08:15:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.311836 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.311848 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.312196 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.312582 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:54 crc kubenswrapper[4784]: E0106 08:15:54.312573 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:54 crc kubenswrapper[4784]: E0106 08:15:54.312712 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:54 crc kubenswrapper[4784]: E0106 08:15:54.312842 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:15:54 crc kubenswrapper[4784]: E0106 08:15:54.312998 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.343722 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.343785 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.343805 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.343830 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.343849 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:54Z","lastTransitionTime":"2026-01-06T08:15:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.446690 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.447058 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.447237 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.447413 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.447812 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:54Z","lastTransitionTime":"2026-01-06T08:15:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.550480 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.550592 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.550618 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.550651 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.550675 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:54Z","lastTransitionTime":"2026-01-06T08:15:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.653312 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.653351 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.653362 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.653377 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.653389 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:54Z","lastTransitionTime":"2026-01-06T08:15:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.755966 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.756022 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.756040 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.756064 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.756082 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:54Z","lastTransitionTime":"2026-01-06T08:15:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.859665 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.860112 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.860298 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.860672 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.860928 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:54Z","lastTransitionTime":"2026-01-06T08:15:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.964193 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.964264 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.964299 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.964334 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:54 crc kubenswrapper[4784]: I0106 08:15:54.964354 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:54Z","lastTransitionTime":"2026-01-06T08:15:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.067459 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.067524 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.067580 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.067614 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.067635 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:55Z","lastTransitionTime":"2026-01-06T08:15:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.171092 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.171401 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.171647 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.171813 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.171947 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:55Z","lastTransitionTime":"2026-01-06T08:15:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.274479 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.274526 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.274537 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.274569 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.274581 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:55Z","lastTransitionTime":"2026-01-06T08:15:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.377164 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.377211 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.377223 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.377241 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.377254 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:55Z","lastTransitionTime":"2026-01-06T08:15:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.480332 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.480395 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.480414 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.480441 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.480459 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:55Z","lastTransitionTime":"2026-01-06T08:15:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.583611 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.583678 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.583697 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.583723 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.583741 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:55Z","lastTransitionTime":"2026-01-06T08:15:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.686936 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.686998 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.687016 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.687040 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.687059 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:55Z","lastTransitionTime":"2026-01-06T08:15:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.789876 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.789945 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.789970 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.790002 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.790026 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:55Z","lastTransitionTime":"2026-01-06T08:15:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.893236 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.893285 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.893304 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.893327 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.893344 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:55Z","lastTransitionTime":"2026-01-06T08:15:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.996369 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.996421 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.996439 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.996462 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:55 crc kubenswrapper[4784]: I0106 08:15:55.996479 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:55Z","lastTransitionTime":"2026-01-06T08:15:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.100091 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.100143 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.100159 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.100182 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.100198 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:56Z","lastTransitionTime":"2026-01-06T08:15:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.203135 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.203199 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.203220 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.203245 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.203263 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:56Z","lastTransitionTime":"2026-01-06T08:15:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.307280 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.308389 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.308602 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.308805 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.309007 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:56Z","lastTransitionTime":"2026-01-06T08:15:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.311794 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.311850 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:56 crc kubenswrapper[4784]: E0106 08:15:56.311991 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.312011 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.311850 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:56 crc kubenswrapper[4784]: E0106 08:15:56.312096 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:15:56 crc kubenswrapper[4784]: E0106 08:15:56.312207 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:56 crc kubenswrapper[4784]: E0106 08:15:56.312348 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.412188 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.412526 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.412775 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.412971 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.413162 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:56Z","lastTransitionTime":"2026-01-06T08:15:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.517113 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.517184 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.517205 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.517233 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.517253 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:56Z","lastTransitionTime":"2026-01-06T08:15:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.619958 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.620012 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.620029 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.620052 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.620079 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:56Z","lastTransitionTime":"2026-01-06T08:15:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.722947 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.723001 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.723017 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.723037 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.723051 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:56Z","lastTransitionTime":"2026-01-06T08:15:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.825912 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.826317 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.826510 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.826754 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.826958 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:56Z","lastTransitionTime":"2026-01-06T08:15:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.930494 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.930811 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.930895 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.930979 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:56 crc kubenswrapper[4784]: I0106 08:15:56.931054 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:56Z","lastTransitionTime":"2026-01-06T08:15:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.033987 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.034564 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.034659 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.034737 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.034813 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:57Z","lastTransitionTime":"2026-01-06T08:15:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.138125 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.138172 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.138189 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.138211 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.138228 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:57Z","lastTransitionTime":"2026-01-06T08:15:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.241986 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.242059 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.242072 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.242092 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.242107 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:57Z","lastTransitionTime":"2026-01-06T08:15:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.344292 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.344359 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.344381 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.344407 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.344426 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:57Z","lastTransitionTime":"2026-01-06T08:15:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.447331 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.447388 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.447404 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.447436 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.447453 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:57Z","lastTransitionTime":"2026-01-06T08:15:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.550198 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.550249 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.550265 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.550283 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.550299 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:57Z","lastTransitionTime":"2026-01-06T08:15:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.653601 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.653666 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.653689 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.653721 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.653744 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:57Z","lastTransitionTime":"2026-01-06T08:15:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.756808 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.756872 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.756896 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.756925 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.756946 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:57Z","lastTransitionTime":"2026-01-06T08:15:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.859809 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.859843 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.859855 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.859871 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.859883 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:57Z","lastTransitionTime":"2026-01-06T08:15:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.963367 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.963441 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.963469 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.963497 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:57 crc kubenswrapper[4784]: I0106 08:15:57.963520 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:57Z","lastTransitionTime":"2026-01-06T08:15:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.066970 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.067283 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.067432 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.067655 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.067879 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:58Z","lastTransitionTime":"2026-01-06T08:15:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.171201 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.171617 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.171820 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.171971 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.172098 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:58Z","lastTransitionTime":"2026-01-06T08:15:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.275442 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.276081 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.276287 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.276461 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.276848 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:58Z","lastTransitionTime":"2026-01-06T08:15:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.311299 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.311386 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.311402 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:15:58 crc kubenswrapper[4784]: E0106 08:15:58.311626 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.311658 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:15:58 crc kubenswrapper[4784]: E0106 08:15:58.311745 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:15:58 crc kubenswrapper[4784]: E0106 08:15:58.311865 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:15:58 crc kubenswrapper[4784]: E0106 08:15:58.311941 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.332084 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"289c7f80-774f-4019-a3a9-b6bd325bc76d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cea0c4194d26b1d6e3a19a06f925dcb89e24e2ccacf253f91330141defd12532\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c09368fd00349fc66f7f1e770c5553457be595c44283c8076f38d8fbdf94613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://86bd77cfbf0d2c7762b7ffae176effd11cb8be214d98b92e4c19d82bdd574621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.380079 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.380134 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.380153 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.380177 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.380196 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:58Z","lastTransitionTime":"2026-01-06T08:15:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.380272 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:48Z\\\",\\\"message\\\":\\\"ing reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.392905 6436 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0106 08:15:48.393026 6436 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.393302 6436 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:48.392004 6436 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0106 08:15:48.393603 6436 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0106 08:15:48.393882 6436 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.394386 6436 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:48.394401 6436 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:48.394428 6436 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:48.394448 6436 factory.go:656] Stopping watch factory\\\\nI0106 08:15:48.394462 6436 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:48.394485 6436 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:48.394496 6436 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.395711 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.412106 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.427775 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.439263 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.449637 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.459302 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.471993 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.484623 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.484659 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.484670 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.484683 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.484692 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:58Z","lastTransitionTime":"2026-01-06T08:15:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.484887 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.503036 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.519521 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.536648 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.552036 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.570435 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.588039 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.588078 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.588088 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.588049 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.588105 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.588333 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:58Z","lastTransitionTime":"2026-01-06T08:15:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.604905 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:15:58Z is after 2025-08-24T17:21:41Z" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.691219 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.691278 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.691297 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.691320 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.691338 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:58Z","lastTransitionTime":"2026-01-06T08:15:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.794656 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.794721 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.794746 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.794778 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.794799 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:58Z","lastTransitionTime":"2026-01-06T08:15:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.897854 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.897903 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.897920 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.897942 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:58 crc kubenswrapper[4784]: I0106 08:15:58.897958 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:58Z","lastTransitionTime":"2026-01-06T08:15:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.000708 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.000757 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.000774 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.000797 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.000814 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:59Z","lastTransitionTime":"2026-01-06T08:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.115015 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.115074 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.115090 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.115116 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.115134 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:59Z","lastTransitionTime":"2026-01-06T08:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.218203 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.218235 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.218246 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.218260 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.218271 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:59Z","lastTransitionTime":"2026-01-06T08:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.320233 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.320277 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.320295 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.320313 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.320329 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:59Z","lastTransitionTime":"2026-01-06T08:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.422737 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.422786 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.422798 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.422816 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.422829 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:59Z","lastTransitionTime":"2026-01-06T08:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.526230 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.526292 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.526306 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.526324 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.526338 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:59Z","lastTransitionTime":"2026-01-06T08:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.629596 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.629647 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.629665 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.629689 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.629707 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:59Z","lastTransitionTime":"2026-01-06T08:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.732481 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.732532 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.732562 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.732578 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.732590 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:59Z","lastTransitionTime":"2026-01-06T08:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.835903 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.835987 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.836013 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.836043 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.836067 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:59Z","lastTransitionTime":"2026-01-06T08:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.938714 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.938781 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.938805 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.938831 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:15:59 crc kubenswrapper[4784]: I0106 08:15:59.938851 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:15:59Z","lastTransitionTime":"2026-01-06T08:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.042688 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.042746 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.042768 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.042796 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.042817 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:00Z","lastTransitionTime":"2026-01-06T08:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.147200 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.147284 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.147302 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.147325 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.147342 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:00Z","lastTransitionTime":"2026-01-06T08:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.250450 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.250495 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.250526 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.250565 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.250578 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:00Z","lastTransitionTime":"2026-01-06T08:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.266177 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.266227 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.266238 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.266253 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.266264 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:00Z","lastTransitionTime":"2026-01-06T08:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:00 crc kubenswrapper[4784]: E0106 08:16:00.279306 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:00Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.282975 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.283024 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.283033 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.283047 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.283056 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:00Z","lastTransitionTime":"2026-01-06T08:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:00 crc kubenswrapper[4784]: E0106 08:16:00.298854 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:00Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.302884 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.303084 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.303229 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.303370 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.303502 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:00Z","lastTransitionTime":"2026-01-06T08:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.313497 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.314215 4784 scope.go:117] "RemoveContainer" containerID="45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.313881 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:00 crc kubenswrapper[4784]: E0106 08:16:00.314350 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:00 crc kubenswrapper[4784]: E0106 08:16:00.314454 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\"" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.313869 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:00 crc kubenswrapper[4784]: E0106 08:16:00.314492 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.313905 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:00 crc kubenswrapper[4784]: E0106 08:16:00.314566 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:00 crc kubenswrapper[4784]: E0106 08:16:00.314658 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:00 crc kubenswrapper[4784]: E0106 08:16:00.318129 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:00Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.320864 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.320888 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.320896 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.320908 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.320917 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:00Z","lastTransitionTime":"2026-01-06T08:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:00 crc kubenswrapper[4784]: E0106 08:16:00.336064 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:00Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.340371 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.340402 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.340410 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.340422 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.340431 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:00Z","lastTransitionTime":"2026-01-06T08:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:00 crc kubenswrapper[4784]: E0106 08:16:00.351908 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:00Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:00 crc kubenswrapper[4784]: E0106 08:16:00.352013 4784 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.353290 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.353353 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.353361 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.353375 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.353384 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:00Z","lastTransitionTime":"2026-01-06T08:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.456607 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.456641 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.456649 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.456662 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.456670 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:00Z","lastTransitionTime":"2026-01-06T08:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.560185 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.560252 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.560269 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.560292 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.560309 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:00Z","lastTransitionTime":"2026-01-06T08:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.663343 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.663404 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.663426 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.663459 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.663484 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:00Z","lastTransitionTime":"2026-01-06T08:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.765714 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.765787 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.765811 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.765839 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.765860 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:00Z","lastTransitionTime":"2026-01-06T08:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.868189 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.868260 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.868283 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.868311 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.868327 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:00Z","lastTransitionTime":"2026-01-06T08:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.970482 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.970536 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.970582 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.970604 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:00 crc kubenswrapper[4784]: I0106 08:16:00.970620 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:00Z","lastTransitionTime":"2026-01-06T08:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.073570 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.073615 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.073627 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.073642 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.073653 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:01Z","lastTransitionTime":"2026-01-06T08:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.176982 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.177058 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.177082 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.177111 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.177133 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:01Z","lastTransitionTime":"2026-01-06T08:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.279400 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.279446 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.279460 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.279478 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.279491 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:01Z","lastTransitionTime":"2026-01-06T08:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.382459 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.382507 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.382516 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.382530 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.382558 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:01Z","lastTransitionTime":"2026-01-06T08:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.484867 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.484920 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.484934 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.484948 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.484960 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:01Z","lastTransitionTime":"2026-01-06T08:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.587833 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.587896 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.587918 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.587949 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.587969 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:01Z","lastTransitionTime":"2026-01-06T08:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.690479 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.690513 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.690525 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.690560 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.690572 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:01Z","lastTransitionTime":"2026-01-06T08:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.793432 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.793472 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.793481 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.793496 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.793505 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:01Z","lastTransitionTime":"2026-01-06T08:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.896590 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.896638 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.896649 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.896664 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:01 crc kubenswrapper[4784]: I0106 08:16:01.896676 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:01Z","lastTransitionTime":"2026-01-06T08:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.000468 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.000527 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.000567 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.000594 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.000609 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:02Z","lastTransitionTime":"2026-01-06T08:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.104014 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.104075 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.104091 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.104117 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.104133 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:02Z","lastTransitionTime":"2026-01-06T08:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.208000 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.208086 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.208112 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.208147 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.208171 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:02Z","lastTransitionTime":"2026-01-06T08:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.311224 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.311272 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.311289 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.311309 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.311322 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:02Z","lastTransitionTime":"2026-01-06T08:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.311752 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:02 crc kubenswrapper[4784]: E0106 08:16:02.312176 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.312482 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:02 crc kubenswrapper[4784]: E0106 08:16:02.312952 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.313040 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.313078 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:02 crc kubenswrapper[4784]: E0106 08:16:02.313255 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:02 crc kubenswrapper[4784]: E0106 08:16:02.313425 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.329366 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.414244 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.414281 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.414292 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.414310 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.414322 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:02Z","lastTransitionTime":"2026-01-06T08:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.517301 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.517348 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.517359 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.517374 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.517386 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:02Z","lastTransitionTime":"2026-01-06T08:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.619783 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.619837 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.619854 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.619879 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.619896 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:02Z","lastTransitionTime":"2026-01-06T08:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.722895 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.722952 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.722965 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.722988 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.723004 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:02Z","lastTransitionTime":"2026-01-06T08:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.825904 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.825955 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.825965 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.825984 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.825997 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:02Z","lastTransitionTime":"2026-01-06T08:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.928569 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.928617 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.928630 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.928649 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:02 crc kubenswrapper[4784]: I0106 08:16:02.928661 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:02Z","lastTransitionTime":"2026-01-06T08:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.032002 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.032083 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.032094 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.032116 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.032130 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:03Z","lastTransitionTime":"2026-01-06T08:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.135368 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.135437 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.135459 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.135487 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.135507 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:03Z","lastTransitionTime":"2026-01-06T08:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.238310 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.238363 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.238381 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.238405 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.238424 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:03Z","lastTransitionTime":"2026-01-06T08:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.341938 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.342041 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.342061 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.342090 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.342180 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:03Z","lastTransitionTime":"2026-01-06T08:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.445353 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.445414 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.445428 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.445450 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.445466 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:03Z","lastTransitionTime":"2026-01-06T08:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.548888 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.548983 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.549010 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.549052 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.549076 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:03Z","lastTransitionTime":"2026-01-06T08:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.651812 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.651847 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.651856 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.651870 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.651879 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:03Z","lastTransitionTime":"2026-01-06T08:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.755113 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.755168 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.755187 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.755213 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.755229 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:03Z","lastTransitionTime":"2026-01-06T08:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.858661 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.858725 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.858736 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.858762 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.858776 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:03Z","lastTransitionTime":"2026-01-06T08:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.961218 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.961276 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.961288 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.961305 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:03 crc kubenswrapper[4784]: I0106 08:16:03.961317 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:03Z","lastTransitionTime":"2026-01-06T08:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.064201 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.064246 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.064258 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.064280 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.064293 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:04Z","lastTransitionTime":"2026-01-06T08:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.167724 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.167772 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.167782 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.167798 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.167808 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:04Z","lastTransitionTime":"2026-01-06T08:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.271357 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.271422 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.271435 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.271456 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.271468 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:04Z","lastTransitionTime":"2026-01-06T08:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.312220 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.312249 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:04 crc kubenswrapper[4784]: E0106 08:16:04.312433 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.312476 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.312498 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:04 crc kubenswrapper[4784]: E0106 08:16:04.312677 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:04 crc kubenswrapper[4784]: E0106 08:16:04.312761 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:04 crc kubenswrapper[4784]: E0106 08:16:04.312890 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.375303 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.375349 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.375357 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.375371 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.375387 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:04Z","lastTransitionTime":"2026-01-06T08:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.478186 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.478276 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.478294 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.478319 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.478371 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:04Z","lastTransitionTime":"2026-01-06T08:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.580947 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.581029 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.581066 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.581084 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.581096 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:04Z","lastTransitionTime":"2026-01-06T08:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.683799 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.684369 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.684398 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.684428 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.684447 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:04Z","lastTransitionTime":"2026-01-06T08:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.786399 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.786513 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.786538 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.786587 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.786605 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:04Z","lastTransitionTime":"2026-01-06T08:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.892665 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.892715 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.892814 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.892853 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.892874 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:04Z","lastTransitionTime":"2026-01-06T08:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.995654 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.995693 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.995703 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.995715 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:04 crc kubenswrapper[4784]: I0106 08:16:04.995726 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:04Z","lastTransitionTime":"2026-01-06T08:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.098377 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.098446 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.098470 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.098497 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.098514 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:05Z","lastTransitionTime":"2026-01-06T08:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.201428 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.201465 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.201475 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.201488 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.201498 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:05Z","lastTransitionTime":"2026-01-06T08:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.304750 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.304814 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.304832 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.304857 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.304874 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:05Z","lastTransitionTime":"2026-01-06T08:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.407499 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.407564 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.407577 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.407598 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.407613 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:05Z","lastTransitionTime":"2026-01-06T08:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.510386 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.510461 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.510487 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.510516 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.510537 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:05Z","lastTransitionTime":"2026-01-06T08:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.613351 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.613402 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.613419 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.613436 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.613447 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:05Z","lastTransitionTime":"2026-01-06T08:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.716254 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.716293 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.716302 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.716318 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.716328 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:05Z","lastTransitionTime":"2026-01-06T08:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.818228 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.818276 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.818290 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.818310 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.818321 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:05Z","lastTransitionTime":"2026-01-06T08:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.921045 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.921100 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.921112 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.921129 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:05 crc kubenswrapper[4784]: I0106 08:16:05.921141 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:05Z","lastTransitionTime":"2026-01-06T08:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.023811 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.023863 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.023874 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.023891 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.023905 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:06Z","lastTransitionTime":"2026-01-06T08:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.126744 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.126786 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.126798 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.126816 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.126830 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:06Z","lastTransitionTime":"2026-01-06T08:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.232138 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.232258 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.232269 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.232289 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.232299 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:06Z","lastTransitionTime":"2026-01-06T08:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.311461 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.311471 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.311657 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:06 crc kubenswrapper[4784]: E0106 08:16:06.311625 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.311476 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:06 crc kubenswrapper[4784]: E0106 08:16:06.311773 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:06 crc kubenswrapper[4784]: E0106 08:16:06.311836 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:06 crc kubenswrapper[4784]: E0106 08:16:06.311922 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.334851 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.334903 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.334915 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.334933 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.334944 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:06Z","lastTransitionTime":"2026-01-06T08:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.437679 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.437750 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.437775 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.437804 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.437825 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:06Z","lastTransitionTime":"2026-01-06T08:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.540729 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.540798 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.540825 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.540853 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.540873 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:06Z","lastTransitionTime":"2026-01-06T08:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.643745 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.643791 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.643801 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.643816 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.643824 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:06Z","lastTransitionTime":"2026-01-06T08:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.746059 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.746121 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.746143 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.746172 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.746197 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:06Z","lastTransitionTime":"2026-01-06T08:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.849268 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.849337 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.849361 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.849389 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.849412 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:06Z","lastTransitionTime":"2026-01-06T08:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.952758 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.952800 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.952814 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.952839 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:06 crc kubenswrapper[4784]: I0106 08:16:06.952853 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:06Z","lastTransitionTime":"2026-01-06T08:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.055750 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.055790 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.055798 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.055811 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.055821 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:07Z","lastTransitionTime":"2026-01-06T08:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.158452 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.158513 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.158529 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.158578 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.158597 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:07Z","lastTransitionTime":"2026-01-06T08:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.260905 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.261183 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.261268 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.261350 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.261433 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:07Z","lastTransitionTime":"2026-01-06T08:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.364038 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.364068 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.364077 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.364089 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.364098 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:07Z","lastTransitionTime":"2026-01-06T08:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.466447 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.466475 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.466483 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.466496 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.466505 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:07Z","lastTransitionTime":"2026-01-06T08:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.568957 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.568989 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.569001 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.569017 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.569028 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:07Z","lastTransitionTime":"2026-01-06T08:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.670793 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.670828 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.670837 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.670851 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.670862 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:07Z","lastTransitionTime":"2026-01-06T08:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.772318 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.772379 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.772397 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.772903 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.772962 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:07Z","lastTransitionTime":"2026-01-06T08:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.876641 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.876689 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.876701 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.876722 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.876738 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:07Z","lastTransitionTime":"2026-01-06T08:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.979420 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.979474 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.979485 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.979500 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:07 crc kubenswrapper[4784]: I0106 08:16:07.979512 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:07Z","lastTransitionTime":"2026-01-06T08:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.082030 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.082116 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.082142 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.082219 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.082246 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:08Z","lastTransitionTime":"2026-01-06T08:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.185030 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.185074 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.185086 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.185105 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.185117 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:08Z","lastTransitionTime":"2026-01-06T08:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.287839 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.287953 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.287977 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.288007 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.288029 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:08Z","lastTransitionTime":"2026-01-06T08:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.312716 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.312837 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.312910 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:08 crc kubenswrapper[4784]: E0106 08:16:08.312855 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.313053 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:08 crc kubenswrapper[4784]: E0106 08:16:08.313055 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:08 crc kubenswrapper[4784]: E0106 08:16:08.313117 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:08 crc kubenswrapper[4784]: E0106 08:16:08.313220 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.313342 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs\") pod \"network-metrics-daemon-xfktc\" (UID: \"e957a369-1cc7-450b-821f-3ee12341caef\") " pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:08 crc kubenswrapper[4784]: E0106 08:16:08.313625 4784 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:16:08 crc kubenswrapper[4784]: E0106 08:16:08.313738 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs podName:e957a369-1cc7-450b-821f-3ee12341caef nodeName:}" failed. No retries permitted until 2026-01-06 08:16:40.313701319 +0000 UTC m=+102.359874196 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs") pod "network-metrics-daemon-xfktc" (UID: "e957a369-1cc7-450b-821f-3ee12341caef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.323895 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.339093 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.353175 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.366241 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.376051 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.387297 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.390145 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.390216 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.390240 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.390269 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.390292 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:08Z","lastTransitionTime":"2026-01-06T08:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.401844 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.418840 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.431724 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2a74be2-be48-4b42-9c2c-1fd8040c62ee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31a7b648a0338258b4719dc2489659cbb9974805adf9f592f4a95aa561433c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.446257 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.460732 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.471574 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.487661 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.492679 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.492729 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.492749 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.492772 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.492790 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:08Z","lastTransitionTime":"2026-01-06T08:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.504310 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.521485 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.532325 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"289c7f80-774f-4019-a3a9-b6bd325bc76d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cea0c4194d26b1d6e3a19a06f925dcb89e24e2ccacf253f91330141defd12532\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c09368fd00349fc66f7f1e770c5553457be595c44283c8076f38d8fbdf94613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://86bd77cfbf0d2c7762b7ffae176effd11cb8be214d98b92e4c19d82bdd574621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.554995 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:48Z\\\",\\\"message\\\":\\\"ing reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.392905 6436 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0106 08:15:48.393026 6436 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.393302 6436 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:48.392004 6436 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0106 08:15:48.393603 6436 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0106 08:15:48.393882 6436 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.394386 6436 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:48.394401 6436 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:48.394428 6436 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:48.394448 6436 factory.go:656] Stopping watch factory\\\\nI0106 08:15:48.394462 6436 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:48.394485 6436 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:48.394496 6436 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.568686 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.595457 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.595511 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.595528 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.595574 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.595594 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:08Z","lastTransitionTime":"2026-01-06T08:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.697690 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.697721 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.697729 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.697743 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.697751 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:08Z","lastTransitionTime":"2026-01-06T08:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.774124 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-l2xdd_85f24cc3-ceca-49ce-b774-32e773e72c1a/kube-multus/0.log" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.774207 4784 generic.go:334] "Generic (PLEG): container finished" podID="85f24cc3-ceca-49ce-b774-32e773e72c1a" containerID="1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e" exitCode=1 Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.774267 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-l2xdd" event={"ID":"85f24cc3-ceca-49ce-b774-32e773e72c1a","Type":"ContainerDied","Data":"1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e"} Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.775021 4784 scope.go:117] "RemoveContainer" containerID="1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.792262 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"289c7f80-774f-4019-a3a9-b6bd325bc76d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cea0c4194d26b1d6e3a19a06f925dcb89e24e2ccacf253f91330141defd12532\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c09368fd00349fc66f7f1e770c5553457be595c44283c8076f38d8fbdf94613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://86bd77cfbf0d2c7762b7ffae176effd11cb8be214d98b92e4c19d82bdd574621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.807358 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.807398 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.807409 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.807428 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.807440 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:08Z","lastTransitionTime":"2026-01-06T08:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.824906 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:48Z\\\",\\\"message\\\":\\\"ing reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.392905 6436 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0106 08:15:48.393026 6436 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.393302 6436 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:48.392004 6436 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0106 08:15:48.393603 6436 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0106 08:15:48.393882 6436 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.394386 6436 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:48.394401 6436 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:48.394428 6436 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:48.394448 6436 factory.go:656] Stopping watch factory\\\\nI0106 08:15:48.394462 6436 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:48.394485 6436 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:48.394496 6436 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.837732 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.858735 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.876872 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.892176 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.904003 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.914010 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.914048 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.914059 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.914074 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.914084 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:08Z","lastTransitionTime":"2026-01-06T08:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.915636 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.934884 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.949024 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.961736 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2a74be2-be48-4b42-9c2c-1fd8040c62ee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31a7b648a0338258b4719dc2489659cbb9974805adf9f592f4a95aa561433c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.976507 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:08 crc kubenswrapper[4784]: I0106 08:16:08.989863 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.000907 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:08Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.012502 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.016431 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.016467 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.016476 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.016489 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.016500 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:09Z","lastTransitionTime":"2026-01-06T08:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.025230 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.037113 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.054739 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:16:08Z\\\",\\\"message\\\":\\\"2026-01-06T08:15:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25\\\\n2026-01-06T08:15:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25 to /host/opt/cni/bin/\\\\n2026-01-06T08:15:23Z [verbose] multus-daemon started\\\\n2026-01-06T08:15:23Z [verbose] Readiness Indicator file check\\\\n2026-01-06T08:16:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.118338 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.118380 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.118392 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.118407 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.118415 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:09Z","lastTransitionTime":"2026-01-06T08:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.220257 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.220285 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.220295 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.220309 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.220320 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:09Z","lastTransitionTime":"2026-01-06T08:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.321952 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.321989 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.322000 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.322015 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.322026 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:09Z","lastTransitionTime":"2026-01-06T08:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.423967 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.424166 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.424257 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.424374 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.424474 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:09Z","lastTransitionTime":"2026-01-06T08:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.527345 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.527393 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.527401 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.527419 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.527429 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:09Z","lastTransitionTime":"2026-01-06T08:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.630305 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.630352 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.630363 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.630379 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.630388 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:09Z","lastTransitionTime":"2026-01-06T08:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.733732 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.733791 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.733813 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.733843 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.733864 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:09Z","lastTransitionTime":"2026-01-06T08:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.779903 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-l2xdd_85f24cc3-ceca-49ce-b774-32e773e72c1a/kube-multus/0.log" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.779988 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-l2xdd" event={"ID":"85f24cc3-ceca-49ce-b774-32e773e72c1a","Type":"ContainerStarted","Data":"3a19c109a46a0e3207cb4071747c8e1f3148870f185be59d729052b2547792c1"} Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.792096 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"289c7f80-774f-4019-a3a9-b6bd325bc76d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cea0c4194d26b1d6e3a19a06f925dcb89e24e2ccacf253f91330141defd12532\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c09368fd00349fc66f7f1e770c5553457be595c44283c8076f38d8fbdf94613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://86bd77cfbf0d2c7762b7ffae176effd11cb8be214d98b92e4c19d82bdd574621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.809815 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:48Z\\\",\\\"message\\\":\\\"ing reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.392905 6436 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0106 08:15:48.393026 6436 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.393302 6436 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:48.392004 6436 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0106 08:15:48.393603 6436 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0106 08:15:48.393882 6436 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.394386 6436 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:48.394401 6436 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:48.394428 6436 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:48.394448 6436 factory.go:656] Stopping watch factory\\\\nI0106 08:15:48.394462 6436 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:48.394485 6436 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:48.394496 6436 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.825037 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.837511 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.837586 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.837602 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.837627 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.837645 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:09Z","lastTransitionTime":"2026-01-06T08:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.840351 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.857667 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.871595 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.882539 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.895390 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.906923 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.920811 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2a74be2-be48-4b42-9c2c-1fd8040c62ee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31a7b648a0338258b4719dc2489659cbb9974805adf9f592f4a95aa561433c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.935274 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.939861 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.939901 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.939913 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.939929 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.939939 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:09Z","lastTransitionTime":"2026-01-06T08:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.947062 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.956074 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.965072 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.973446 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.986015 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:09 crc kubenswrapper[4784]: I0106 08:16:09.998175 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:09Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.010503 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a19c109a46a0e3207cb4071747c8e1f3148870f185be59d729052b2547792c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:16:08Z\\\",\\\"message\\\":\\\"2026-01-06T08:15:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25\\\\n2026-01-06T08:15:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25 to /host/opt/cni/bin/\\\\n2026-01-06T08:15:23Z [verbose] multus-daemon started\\\\n2026-01-06T08:15:23Z [verbose] Readiness Indicator file check\\\\n2026-01-06T08:16:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:10Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.042625 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.042665 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.042676 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.042692 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.042707 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:10Z","lastTransitionTime":"2026-01-06T08:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.144864 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.144908 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.144920 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.144935 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.144949 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:10Z","lastTransitionTime":"2026-01-06T08:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.246938 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.246997 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.247015 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.247038 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.247058 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:10Z","lastTransitionTime":"2026-01-06T08:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.312094 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.312130 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:10 crc kubenswrapper[4784]: E0106 08:16:10.312332 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.312405 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.312448 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:10 crc kubenswrapper[4784]: E0106 08:16:10.312611 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:10 crc kubenswrapper[4784]: E0106 08:16:10.312762 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:10 crc kubenswrapper[4784]: E0106 08:16:10.312920 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.349353 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.349395 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.349405 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.349422 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.349431 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:10Z","lastTransitionTime":"2026-01-06T08:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.451713 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.451759 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.451768 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.451782 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.451792 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:10Z","lastTransitionTime":"2026-01-06T08:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.481230 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.481278 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.481288 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.481306 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.481319 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:10Z","lastTransitionTime":"2026-01-06T08:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:10 crc kubenswrapper[4784]: E0106 08:16:10.498089 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:10Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.504340 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.504401 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.504413 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.504430 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.504444 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:10Z","lastTransitionTime":"2026-01-06T08:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:10 crc kubenswrapper[4784]: E0106 08:16:10.519599 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:10Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.524071 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.524108 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.524119 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.524134 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.524144 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:10Z","lastTransitionTime":"2026-01-06T08:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:10 crc kubenswrapper[4784]: E0106 08:16:10.537747 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:10Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.542367 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.542434 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.542454 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.542479 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.542518 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:10Z","lastTransitionTime":"2026-01-06T08:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:10 crc kubenswrapper[4784]: E0106 08:16:10.557154 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:10Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.561505 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.561586 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.561609 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.561634 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.561656 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:10Z","lastTransitionTime":"2026-01-06T08:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:10 crc kubenswrapper[4784]: E0106 08:16:10.576371 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:10Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:10 crc kubenswrapper[4784]: E0106 08:16:10.576623 4784 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.579630 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.579724 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.579745 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.579768 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.579787 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:10Z","lastTransitionTime":"2026-01-06T08:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.682620 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.682706 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.682720 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.682747 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.682761 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:10Z","lastTransitionTime":"2026-01-06T08:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.784696 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.784748 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.784759 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.784778 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.784791 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:10Z","lastTransitionTime":"2026-01-06T08:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.887500 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.887599 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.887626 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.887658 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.887682 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:10Z","lastTransitionTime":"2026-01-06T08:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.991567 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.991621 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.991636 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.991657 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:10 crc kubenswrapper[4784]: I0106 08:16:10.991672 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:10Z","lastTransitionTime":"2026-01-06T08:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.095430 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.095503 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.095521 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.095592 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.095621 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:11Z","lastTransitionTime":"2026-01-06T08:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.199367 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.199430 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.199442 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.199463 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.199478 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:11Z","lastTransitionTime":"2026-01-06T08:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.302309 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.302394 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.302413 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.302440 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.302459 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:11Z","lastTransitionTime":"2026-01-06T08:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.405854 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.405930 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.405954 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.405981 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.405999 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:11Z","lastTransitionTime":"2026-01-06T08:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.510360 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.510431 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.510449 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.510485 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.510508 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:11Z","lastTransitionTime":"2026-01-06T08:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.613573 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.613635 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.613652 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.613680 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.613697 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:11Z","lastTransitionTime":"2026-01-06T08:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.717131 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.717184 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.717198 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.717239 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.717255 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:11Z","lastTransitionTime":"2026-01-06T08:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.819941 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.819981 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.819989 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.820003 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.820014 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:11Z","lastTransitionTime":"2026-01-06T08:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.923364 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.923427 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.923443 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.923465 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:11 crc kubenswrapper[4784]: I0106 08:16:11.923482 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:11Z","lastTransitionTime":"2026-01-06T08:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.026980 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.027011 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.027019 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.027031 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.027039 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:12Z","lastTransitionTime":"2026-01-06T08:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.131464 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.131541 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.131609 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.131651 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.131674 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:12Z","lastTransitionTime":"2026-01-06T08:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.234260 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.234300 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.234311 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.234328 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.234339 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:12Z","lastTransitionTime":"2026-01-06T08:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.312048 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.312103 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.312121 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.312188 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:12 crc kubenswrapper[4784]: E0106 08:16:12.312286 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:12 crc kubenswrapper[4784]: E0106 08:16:12.312412 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:12 crc kubenswrapper[4784]: E0106 08:16:12.312743 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:12 crc kubenswrapper[4784]: E0106 08:16:12.312857 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.337414 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.337510 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.337528 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.337601 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.337625 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:12Z","lastTransitionTime":"2026-01-06T08:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.440106 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.440165 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.440179 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.440197 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.440212 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:12Z","lastTransitionTime":"2026-01-06T08:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.543658 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.543718 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.543756 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.543781 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.543798 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:12Z","lastTransitionTime":"2026-01-06T08:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.646511 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.646593 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.646606 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.646625 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.646637 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:12Z","lastTransitionTime":"2026-01-06T08:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.749392 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.749427 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.749437 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.749449 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.749457 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:12Z","lastTransitionTime":"2026-01-06T08:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.853002 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.853042 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.853050 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.853065 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.853075 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:12Z","lastTransitionTime":"2026-01-06T08:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.956492 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.956596 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.956622 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.956649 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:12 crc kubenswrapper[4784]: I0106 08:16:12.956672 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:12Z","lastTransitionTime":"2026-01-06T08:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.059851 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.059919 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.059937 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.059964 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.059981 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:13Z","lastTransitionTime":"2026-01-06T08:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.162610 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.162672 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.162694 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.162723 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.162744 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:13Z","lastTransitionTime":"2026-01-06T08:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.264971 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.265037 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.265056 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.265080 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.265097 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:13Z","lastTransitionTime":"2026-01-06T08:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.368029 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.368092 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.368114 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.368142 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.368164 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:13Z","lastTransitionTime":"2026-01-06T08:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.470589 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.470655 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.470672 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.470697 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.470714 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:13Z","lastTransitionTime":"2026-01-06T08:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.573268 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.573326 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.573349 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.573373 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.573390 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:13Z","lastTransitionTime":"2026-01-06T08:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.680397 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.680438 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.680450 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.680464 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.680475 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:13Z","lastTransitionTime":"2026-01-06T08:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.783610 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.783672 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.783695 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.783723 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.783745 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:13Z","lastTransitionTime":"2026-01-06T08:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.886772 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.886840 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.886858 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.886882 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.886900 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:13Z","lastTransitionTime":"2026-01-06T08:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.989440 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.989481 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.989493 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.989518 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:13 crc kubenswrapper[4784]: I0106 08:16:13.989528 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:13Z","lastTransitionTime":"2026-01-06T08:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.092593 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.092651 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.092662 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.092678 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.092689 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:14Z","lastTransitionTime":"2026-01-06T08:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.195732 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.195815 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.195852 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.195884 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.195908 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:14Z","lastTransitionTime":"2026-01-06T08:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.299085 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.299149 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.299173 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.299200 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.299220 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:14Z","lastTransitionTime":"2026-01-06T08:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.312140 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.312250 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.312189 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:14 crc kubenswrapper[4784]: E0106 08:16:14.312350 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.312832 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:14 crc kubenswrapper[4784]: E0106 08:16:14.312938 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:14 crc kubenswrapper[4784]: E0106 08:16:14.313233 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.313376 4784 scope.go:117] "RemoveContainer" containerID="45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049" Jan 06 08:16:14 crc kubenswrapper[4784]: E0106 08:16:14.313381 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.402187 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.402369 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.402398 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.402468 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.402496 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:14Z","lastTransitionTime":"2026-01-06T08:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.506322 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.506426 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.506450 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.506480 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.506502 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:14Z","lastTransitionTime":"2026-01-06T08:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.609657 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.610041 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.610248 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.610496 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.610737 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:14Z","lastTransitionTime":"2026-01-06T08:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.714739 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.714830 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.714856 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.714892 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.714939 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:14Z","lastTransitionTime":"2026-01-06T08:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.817976 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.818042 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.818058 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.818082 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.818100 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:14Z","lastTransitionTime":"2026-01-06T08:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.921259 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.921333 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.921360 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.921398 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:14 crc kubenswrapper[4784]: I0106 08:16:14.921426 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:14Z","lastTransitionTime":"2026-01-06T08:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.023703 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.023756 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.023775 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.023798 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.023813 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:15Z","lastTransitionTime":"2026-01-06T08:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.126376 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.126420 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.126434 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.126451 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.126462 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:15Z","lastTransitionTime":"2026-01-06T08:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.228932 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.228978 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.228989 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.229010 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.229022 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:15Z","lastTransitionTime":"2026-01-06T08:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.331516 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.335505 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.335560 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.335573 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.335589 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.335600 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:15Z","lastTransitionTime":"2026-01-06T08:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.437432 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.437479 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.437488 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.437502 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.437511 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:15Z","lastTransitionTime":"2026-01-06T08:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.541088 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.541136 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.541147 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.541163 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.541174 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:15Z","lastTransitionTime":"2026-01-06T08:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.644318 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.644381 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.644398 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.644420 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.644437 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:15Z","lastTransitionTime":"2026-01-06T08:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.747893 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.747954 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.747975 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.747999 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.748015 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:15Z","lastTransitionTime":"2026-01-06T08:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.805360 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/2.log" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.809817 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerStarted","Data":"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df"} Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.846645 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:48Z\\\",\\\"message\\\":\\\"ing reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.392905 6436 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0106 08:15:48.393026 6436 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.393302 6436 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:48.392004 6436 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0106 08:15:48.393603 6436 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0106 08:15:48.393882 6436 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.394386 6436 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:48.394401 6436 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:48.394428 6436 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:48.394448 6436 factory.go:656] Stopping watch factory\\\\nI0106 08:15:48.394462 6436 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:48.394485 6436 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:48.394496 6436 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:16:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:15Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.852367 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.852452 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.852471 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.852497 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.852515 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:15Z","lastTransitionTime":"2026-01-06T08:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.862152 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:15Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.878820 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"289c7f80-774f-4019-a3a9-b6bd325bc76d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cea0c4194d26b1d6e3a19a06f925dcb89e24e2ccacf253f91330141defd12532\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c09368fd00349fc66f7f1e770c5553457be595c44283c8076f38d8fbdf94613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://86bd77cfbf0d2c7762b7ffae176effd11cb8be214d98b92e4c19d82bdd574621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:15Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.898116 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:15Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.912085 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:15Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.932351 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:15Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.955291 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.955327 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.955339 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.955356 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.955367 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:15Z","lastTransitionTime":"2026-01-06T08:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.965801 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dff2e41c-116e-419f-93a9-06998bea2b86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f6af050344e221c97333671e485113faccc52b13a5a44f5dcb28d462d56b78d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89918edb3ee39dd36af490674dfac92a456dd748c1b463912d3ac90dc593d433\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1acd2684da1d9b18f7f4316b3b0d2e4467114c556352fbfac2ee52cdbd690a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0493343d4ae93f1cf27349de7734f1783b5898f405e99afa96220b1a84e1356e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca141ead88c336c6283de282c5ace12280e66f956d81b84ddf87c78ce26ba36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d35dfaea112bda776f476f94a84d98c7a45e08f2c51036f63a30eb378142020d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d35dfaea112bda776f476f94a84d98c7a45e08f2c51036f63a30eb378142020d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://703e211b4eefc2eb4b2bb85dfbcf425fcb20e424dcb36af9504ae26d7e9906b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://703e211b4eefc2eb4b2bb85dfbcf425fcb20e424dcb36af9504ae26d7e9906b6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://10656748fb846e4325fb54271358ae5945f8b07e38c2c503d02c08ad1c400907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10656748fb846e4325fb54271358ae5945f8b07e38c2c503d02c08ad1c400907\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:15Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:15 crc kubenswrapper[4784]: I0106 08:16:15.982325 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2a74be2-be48-4b42-9c2c-1fd8040c62ee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31a7b648a0338258b4719dc2489659cbb9974805adf9f592f4a95aa561433c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:15Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.005239 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.020948 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.038144 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.054631 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.063484 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.063579 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.063607 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.063637 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.063659 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:16Z","lastTransitionTime":"2026-01-06T08:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.078191 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.097071 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.109854 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.124104 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.140950 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.158341 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a19c109a46a0e3207cb4071747c8e1f3148870f185be59d729052b2547792c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:16:08Z\\\",\\\"message\\\":\\\"2026-01-06T08:15:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25\\\\n2026-01-06T08:15:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25 to /host/opt/cni/bin/\\\\n2026-01-06T08:15:23Z [verbose] multus-daemon started\\\\n2026-01-06T08:15:23Z [verbose] Readiness Indicator file check\\\\n2026-01-06T08:16:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.173184 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.173259 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.173280 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.173761 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.173811 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:16Z","lastTransitionTime":"2026-01-06T08:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.182768 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.276699 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.277003 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.277093 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.277187 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.277283 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:16Z","lastTransitionTime":"2026-01-06T08:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.311449 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.311482 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:16 crc kubenswrapper[4784]: E0106 08:16:16.311874 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.311584 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:16 crc kubenswrapper[4784]: E0106 08:16:16.312157 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:16 crc kubenswrapper[4784]: E0106 08:16:16.311921 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.311492 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:16 crc kubenswrapper[4784]: E0106 08:16:16.312764 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.380479 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.380578 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.380604 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.380632 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.380653 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:16Z","lastTransitionTime":"2026-01-06T08:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.484046 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.484116 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.484138 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.484168 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.484193 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:16Z","lastTransitionTime":"2026-01-06T08:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.587151 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.587266 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.587290 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.587319 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.587345 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:16Z","lastTransitionTime":"2026-01-06T08:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.690929 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.691013 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.691037 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.691064 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.691083 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:16Z","lastTransitionTime":"2026-01-06T08:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.794272 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.794345 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.794368 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.794401 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.794425 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:16Z","lastTransitionTime":"2026-01-06T08:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.817159 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/3.log" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.818305 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/2.log" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.823068 4784 generic.go:334] "Generic (PLEG): container finished" podID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerID="c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df" exitCode=1 Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.823239 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerDied","Data":"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df"} Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.823500 4784 scope.go:117] "RemoveContainer" containerID="45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.824912 4784 scope.go:117] "RemoveContainer" containerID="c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df" Jan 06 08:16:16 crc kubenswrapper[4784]: E0106 08:16:16.825227 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\"" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.844774 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"289c7f80-774f-4019-a3a9-b6bd325bc76d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cea0c4194d26b1d6e3a19a06f925dcb89e24e2ccacf253f91330141defd12532\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c09368fd00349fc66f7f1e770c5553457be595c44283c8076f38d8fbdf94613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://86bd77cfbf0d2c7762b7ffae176effd11cb8be214d98b92e4c19d82bdd574621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.876385 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:48Z\\\",\\\"message\\\":\\\"ing reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.392905 6436 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0106 08:15:48.393026 6436 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.393302 6436 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:48.392004 6436 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0106 08:15:48.393603 6436 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0106 08:15:48.393882 6436 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.394386 6436 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:48.394401 6436 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:48.394428 6436 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:48.394448 6436 factory.go:656] Stopping watch factory\\\\nI0106 08:15:48.394462 6436 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:48.394485 6436 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:48.394496 6436 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:16:15Z\\\",\\\"message\\\":\\\"ncer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0106 08:16:15.583037 6827 services_controller.go:360] Finished syncing service image-registry-operator on namespace openshift-image-registry for network=default : 23.97µs\\\\nI0106 08:16:15.583044 6827 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durabl\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:16:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.893222 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.897347 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.897453 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.897475 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.897627 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.897651 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:16Z","lastTransitionTime":"2026-01-06T08:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.913151 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.931175 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.946815 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.965379 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.981647 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:16 crc kubenswrapper[4784]: I0106 08:16:16.996250 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:16Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.001158 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.001219 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.001232 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.001248 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.001281 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:17Z","lastTransitionTime":"2026-01-06T08:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.013508 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.033409 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.055106 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.082502 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dff2e41c-116e-419f-93a9-06998bea2b86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f6af050344e221c97333671e485113faccc52b13a5a44f5dcb28d462d56b78d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89918edb3ee39dd36af490674dfac92a456dd748c1b463912d3ac90dc593d433\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1acd2684da1d9b18f7f4316b3b0d2e4467114c556352fbfac2ee52cdbd690a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0493343d4ae93f1cf27349de7734f1783b5898f405e99afa96220b1a84e1356e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca141ead88c336c6283de282c5ace12280e66f956d81b84ddf87c78ce26ba36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d35dfaea112bda776f476f94a84d98c7a45e08f2c51036f63a30eb378142020d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d35dfaea112bda776f476f94a84d98c7a45e08f2c51036f63a30eb378142020d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://703e211b4eefc2eb4b2bb85dfbcf425fcb20e424dcb36af9504ae26d7e9906b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://703e211b4eefc2eb4b2bb85dfbcf425fcb20e424dcb36af9504ae26d7e9906b6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://10656748fb846e4325fb54271358ae5945f8b07e38c2c503d02c08ad1c400907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10656748fb846e4325fb54271358ae5945f8b07e38c2c503d02c08ad1c400907\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.096665 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2a74be2-be48-4b42-9c2c-1fd8040c62ee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31a7b648a0338258b4719dc2489659cbb9974805adf9f592f4a95aa561433c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.105699 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.105784 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.105814 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.105866 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.105896 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:17Z","lastTransitionTime":"2026-01-06T08:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.117524 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.136485 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.160853 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.180727 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.201844 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a19c109a46a0e3207cb4071747c8e1f3148870f185be59d729052b2547792c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:16:08Z\\\",\\\"message\\\":\\\"2026-01-06T08:15:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25\\\\n2026-01-06T08:15:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25 to /host/opt/cni/bin/\\\\n2026-01-06T08:15:23Z [verbose] multus-daemon started\\\\n2026-01-06T08:15:23Z [verbose] Readiness Indicator file check\\\\n2026-01-06T08:16:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:17Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.208986 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.209039 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.209058 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.209081 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.209099 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:17Z","lastTransitionTime":"2026-01-06T08:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.312272 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.312339 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.312357 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.312381 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.312405 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:17Z","lastTransitionTime":"2026-01-06T08:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.415830 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.415881 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.415893 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.415910 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.415923 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:17Z","lastTransitionTime":"2026-01-06T08:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.518409 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.518486 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.518503 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.518532 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.518577 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:17Z","lastTransitionTime":"2026-01-06T08:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.621299 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.621357 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.621374 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.621395 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.621411 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:17Z","lastTransitionTime":"2026-01-06T08:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.724447 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.724533 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.724608 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.724634 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.724653 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:17Z","lastTransitionTime":"2026-01-06T08:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.827045 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.827103 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.827124 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.827149 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.827168 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:17Z","lastTransitionTime":"2026-01-06T08:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.830265 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/3.log" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.930023 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.930073 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.930086 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.930106 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:17 crc kubenswrapper[4784]: I0106 08:16:17.930120 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:17Z","lastTransitionTime":"2026-01-06T08:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.032970 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.033017 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.033035 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.033054 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.033067 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:18Z","lastTransitionTime":"2026-01-06T08:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.136395 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.136471 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.136495 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.136530 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.136583 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:18Z","lastTransitionTime":"2026-01-06T08:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.239766 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.239806 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.239814 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.239847 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.239859 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:18Z","lastTransitionTime":"2026-01-06T08:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.312044 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:18 crc kubenswrapper[4784]: E0106 08:16:18.312208 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.312489 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:18 crc kubenswrapper[4784]: E0106 08:16:18.312630 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.312883 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:18 crc kubenswrapper[4784]: E0106 08:16:18.313001 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.313334 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:18 crc kubenswrapper[4784]: E0106 08:16:18.313466 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.339698 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:48Z\\\",\\\"message\\\":\\\"ing reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.392905 6436 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0106 08:15:48.393026 6436 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.393302 6436 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:48.392004 6436 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0106 08:15:48.393603 6436 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0106 08:15:48.393882 6436 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.394386 6436 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:48.394401 6436 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:48.394428 6436 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:48.394448 6436 factory.go:656] Stopping watch factory\\\\nI0106 08:15:48.394462 6436 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:48.394485 6436 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:48.394496 6436 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:16:15Z\\\",\\\"message\\\":\\\"ncer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0106 08:16:15.583037 6827 services_controller.go:360] Finished syncing service image-registry-operator on namespace openshift-image-registry for network=default : 23.97µs\\\\nI0106 08:16:15.583044 6827 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durabl\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:16:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.342984 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.343060 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.343086 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.343118 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.343141 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:18Z","lastTransitionTime":"2026-01-06T08:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.356317 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.373626 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"289c7f80-774f-4019-a3a9-b6bd325bc76d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cea0c4194d26b1d6e3a19a06f925dcb89e24e2ccacf253f91330141defd12532\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c09368fd00349fc66f7f1e770c5553457be595c44283c8076f38d8fbdf94613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://86bd77cfbf0d2c7762b7ffae176effd11cb8be214d98b92e4c19d82bdd574621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.389294 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.401655 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.420124 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.446141 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.446198 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.446211 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.446230 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.446242 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:18Z","lastTransitionTime":"2026-01-06T08:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.446145 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dff2e41c-116e-419f-93a9-06998bea2b86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f6af050344e221c97333671e485113faccc52b13a5a44f5dcb28d462d56b78d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89918edb3ee39dd36af490674dfac92a456dd748c1b463912d3ac90dc593d433\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1acd2684da1d9b18f7f4316b3b0d2e4467114c556352fbfac2ee52cdbd690a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0493343d4ae93f1cf27349de7734f1783b5898f405e99afa96220b1a84e1356e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca141ead88c336c6283de282c5ace12280e66f956d81b84ddf87c78ce26ba36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d35dfaea112bda776f476f94a84d98c7a45e08f2c51036f63a30eb378142020d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d35dfaea112bda776f476f94a84d98c7a45e08f2c51036f63a30eb378142020d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://703e211b4eefc2eb4b2bb85dfbcf425fcb20e424dcb36af9504ae26d7e9906b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://703e211b4eefc2eb4b2bb85dfbcf425fcb20e424dcb36af9504ae26d7e9906b6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://10656748fb846e4325fb54271358ae5945f8b07e38c2c503d02c08ad1c400907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10656748fb846e4325fb54271358ae5945f8b07e38c2c503d02c08ad1c400907\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.459118 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2a74be2-be48-4b42-9c2c-1fd8040c62ee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31a7b648a0338258b4719dc2489659cbb9974805adf9f592f4a95aa561433c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.473623 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.488448 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.502743 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.515856 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.530338 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.548710 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.549401 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.549467 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.549487 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.549514 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.549534 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:18Z","lastTransitionTime":"2026-01-06T08:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.561845 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.577452 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.596143 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.617338 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a19c109a46a0e3207cb4071747c8e1f3148870f185be59d729052b2547792c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:16:08Z\\\",\\\"message\\\":\\\"2026-01-06T08:15:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25\\\\n2026-01-06T08:15:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25 to /host/opt/cni/bin/\\\\n2026-01-06T08:15:23Z [verbose] multus-daemon started\\\\n2026-01-06T08:15:23Z [verbose] Readiness Indicator file check\\\\n2026-01-06T08:16:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.639192 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:18Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.652777 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.652915 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.652970 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.653008 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.653031 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:18Z","lastTransitionTime":"2026-01-06T08:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.755790 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.755852 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.755870 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.755895 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.755913 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:18Z","lastTransitionTime":"2026-01-06T08:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.859075 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.859145 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.859162 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.859187 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.859208 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:18Z","lastTransitionTime":"2026-01-06T08:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.961916 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.961990 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.962008 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.962033 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:18 crc kubenswrapper[4784]: I0106 08:16:18.962054 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:18Z","lastTransitionTime":"2026-01-06T08:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.065703 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.065781 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.065803 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.065830 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.065845 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:19Z","lastTransitionTime":"2026-01-06T08:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.168799 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.168875 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.168903 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.168933 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.168957 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:19Z","lastTransitionTime":"2026-01-06T08:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.274312 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.274454 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.274473 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.274498 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.274516 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:19Z","lastTransitionTime":"2026-01-06T08:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.377256 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.377297 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.377307 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.377321 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.377329 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:19Z","lastTransitionTime":"2026-01-06T08:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.480774 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.480845 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.480868 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.480903 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.481054 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:19Z","lastTransitionTime":"2026-01-06T08:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.584347 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.584437 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.584454 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.584478 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.584495 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:19Z","lastTransitionTime":"2026-01-06T08:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.687054 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.687113 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.687131 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.687154 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.687175 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:19Z","lastTransitionTime":"2026-01-06T08:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.790632 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.790712 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.790748 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.790781 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.790804 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:19Z","lastTransitionTime":"2026-01-06T08:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.893963 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.894020 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.894037 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.894062 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.894080 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:19Z","lastTransitionTime":"2026-01-06T08:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.997812 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.997886 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.997911 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.997939 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:19 crc kubenswrapper[4784]: I0106 08:16:19.997960 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:19Z","lastTransitionTime":"2026-01-06T08:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.100803 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.100863 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.100880 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.100903 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.100919 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:20Z","lastTransitionTime":"2026-01-06T08:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.143518 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.143849 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.143923 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.143953 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.143912643 +0000 UTC m=+146.190085520 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.144107 4784 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.144136 4784 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.144250 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.144219763 +0000 UTC m=+146.190392630 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.144411 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.144371537 +0000 UTC m=+146.190544414 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.203720 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.203786 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.203803 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.203827 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.203844 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:20Z","lastTransitionTime":"2026-01-06T08:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.244777 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.244821 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.244935 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.244950 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.244960 4784 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.244956 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.245000 4784 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.245013 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.244996273 +0000 UTC m=+146.291169110 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.245014 4784 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.245079 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.245059565 +0000 UTC m=+146.291232412 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.307198 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.307240 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.307252 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.307274 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.307287 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:20Z","lastTransitionTime":"2026-01-06T08:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.311946 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.311962 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.312038 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.312064 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.312117 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.312194 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.312270 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.312290 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.410782 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.410860 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.410883 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.410913 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.410935 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:20Z","lastTransitionTime":"2026-01-06T08:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.514523 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.514640 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.514666 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.514694 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.514717 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:20Z","lastTransitionTime":"2026-01-06T08:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.618185 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.618267 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.618291 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.618319 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.618339 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:20Z","lastTransitionTime":"2026-01-06T08:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.721014 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.721079 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.721097 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.721121 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.721140 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:20Z","lastTransitionTime":"2026-01-06T08:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.792772 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.792837 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.792891 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.792913 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.792933 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:20Z","lastTransitionTime":"2026-01-06T08:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.814113 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:20Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.819320 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.819423 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.819442 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.819466 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.819482 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:20Z","lastTransitionTime":"2026-01-06T08:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.840426 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:20Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.845254 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.845310 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.845330 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.845352 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.845370 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:20Z","lastTransitionTime":"2026-01-06T08:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.866136 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:20Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.870864 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.870915 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.870937 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.870961 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.870979 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:20Z","lastTransitionTime":"2026-01-06T08:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.890879 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:20Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.895441 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.895497 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.895516 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.895538 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.895579 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:20Z","lastTransitionTime":"2026-01-06T08:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.915191 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:20Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:20 crc kubenswrapper[4784]: E0106 08:16:20.915427 4784 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.923491 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.923591 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.923619 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.923648 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:20 crc kubenswrapper[4784]: I0106 08:16:20.923669 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:20Z","lastTransitionTime":"2026-01-06T08:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.026300 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.026438 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.026458 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.026483 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.026500 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:21Z","lastTransitionTime":"2026-01-06T08:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.129776 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.129853 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.129920 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.129947 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.129964 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:21Z","lastTransitionTime":"2026-01-06T08:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.233977 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.234039 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.234058 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.234081 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.234100 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:21Z","lastTransitionTime":"2026-01-06T08:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.338002 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.338167 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.338192 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.338221 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.338242 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:21Z","lastTransitionTime":"2026-01-06T08:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.441741 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.441805 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.441826 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.441847 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.441864 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:21Z","lastTransitionTime":"2026-01-06T08:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.544827 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.544901 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.544926 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.544954 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.544975 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:21Z","lastTransitionTime":"2026-01-06T08:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.647858 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.647917 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.647936 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.647964 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.647987 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:21Z","lastTransitionTime":"2026-01-06T08:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.751823 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.751884 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.751900 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.751923 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.751945 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:21Z","lastTransitionTime":"2026-01-06T08:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.855038 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.855102 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.855121 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.855146 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.855165 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:21Z","lastTransitionTime":"2026-01-06T08:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.958528 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.958634 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.958666 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.958698 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:21 crc kubenswrapper[4784]: I0106 08:16:21.958722 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:21Z","lastTransitionTime":"2026-01-06T08:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.061571 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.061623 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.061639 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.061661 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.061678 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:22Z","lastTransitionTime":"2026-01-06T08:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.164686 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.164724 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.164736 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.164753 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.164765 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:22Z","lastTransitionTime":"2026-01-06T08:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.267401 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.267477 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.267498 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.267521 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.267539 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:22Z","lastTransitionTime":"2026-01-06T08:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.312104 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.312178 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:22 crc kubenswrapper[4784]: E0106 08:16:22.312274 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.312374 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.312104 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:22 crc kubenswrapper[4784]: E0106 08:16:22.312618 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:22 crc kubenswrapper[4784]: E0106 08:16:22.312490 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:22 crc kubenswrapper[4784]: E0106 08:16:22.312715 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.369972 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.370040 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.370059 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.370085 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.370103 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:22Z","lastTransitionTime":"2026-01-06T08:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.472472 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.472534 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.472587 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.472618 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.472639 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:22Z","lastTransitionTime":"2026-01-06T08:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.575858 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.575922 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.575940 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.575967 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.575985 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:22Z","lastTransitionTime":"2026-01-06T08:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.678670 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.678748 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.678775 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.678803 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.678825 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:22Z","lastTransitionTime":"2026-01-06T08:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.782446 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.782492 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.782508 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.782529 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.782577 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:22Z","lastTransitionTime":"2026-01-06T08:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.885134 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.885217 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.885242 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.885269 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.885291 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:22Z","lastTransitionTime":"2026-01-06T08:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.988667 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.988737 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.988754 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.988777 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:22 crc kubenswrapper[4784]: I0106 08:16:22.988794 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:22Z","lastTransitionTime":"2026-01-06T08:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.091867 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.091927 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.091943 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.091970 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.091990 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:23Z","lastTransitionTime":"2026-01-06T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.209015 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.209065 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.209084 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.209119 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.209136 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:23Z","lastTransitionTime":"2026-01-06T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.312284 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.312647 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.313007 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.313316 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.313647 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:23Z","lastTransitionTime":"2026-01-06T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.417041 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.417316 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.417405 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.417500 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.417607 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:23Z","lastTransitionTime":"2026-01-06T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.520911 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.520953 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.520964 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.520981 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.520994 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:23Z","lastTransitionTime":"2026-01-06T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.623627 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.623659 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.623678 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.623695 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.623707 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:23Z","lastTransitionTime":"2026-01-06T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.726992 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.727365 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.727742 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.727793 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.727839 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:23Z","lastTransitionTime":"2026-01-06T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.830502 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.830631 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.830656 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.830686 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.830711 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:23Z","lastTransitionTime":"2026-01-06T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.933099 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.933148 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.933163 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.933180 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:23 crc kubenswrapper[4784]: I0106 08:16:23.933194 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:23Z","lastTransitionTime":"2026-01-06T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.036420 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.036461 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.036470 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.036484 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.036496 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:24Z","lastTransitionTime":"2026-01-06T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.140371 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.140443 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.140467 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.140500 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.140522 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:24Z","lastTransitionTime":"2026-01-06T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.244240 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.244279 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.244287 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.244300 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.244310 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:24Z","lastTransitionTime":"2026-01-06T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.311760 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.311899 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:24 crc kubenswrapper[4784]: E0106 08:16:24.312120 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.312164 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.312221 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:24 crc kubenswrapper[4784]: E0106 08:16:24.312295 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:24 crc kubenswrapper[4784]: E0106 08:16:24.312448 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:24 crc kubenswrapper[4784]: E0106 08:16:24.312679 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.348283 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.348344 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.348367 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.348417 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.348442 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:24Z","lastTransitionTime":"2026-01-06T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.451496 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.451641 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.451661 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.451679 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.451692 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:24Z","lastTransitionTime":"2026-01-06T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.554392 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.554451 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.554472 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.554493 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.554508 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:24Z","lastTransitionTime":"2026-01-06T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.658113 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.658154 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.658163 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.658179 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.658194 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:24Z","lastTransitionTime":"2026-01-06T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.761039 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.761130 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.761151 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.761181 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.761207 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:24Z","lastTransitionTime":"2026-01-06T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.864291 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.864345 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.864356 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.864374 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.864388 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:24Z","lastTransitionTime":"2026-01-06T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.967743 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.967797 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.967817 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.967841 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:24 crc kubenswrapper[4784]: I0106 08:16:24.967859 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:24Z","lastTransitionTime":"2026-01-06T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.071192 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.071255 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.071281 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.071306 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.071324 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:25Z","lastTransitionTime":"2026-01-06T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.174644 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.174716 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.174738 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.174770 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.174800 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:25Z","lastTransitionTime":"2026-01-06T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.278981 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.279023 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.279034 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.279050 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.279062 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:25Z","lastTransitionTime":"2026-01-06T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.381583 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.381619 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.381628 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.381640 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.381650 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:25Z","lastTransitionTime":"2026-01-06T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.486143 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.486210 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.486231 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.486257 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.486276 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:25Z","lastTransitionTime":"2026-01-06T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.589274 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.589332 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.589350 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.589373 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.589390 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:25Z","lastTransitionTime":"2026-01-06T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.692445 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.692516 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.692541 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.692612 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.692634 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:25Z","lastTransitionTime":"2026-01-06T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.795358 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.795419 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.795437 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.795461 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.795481 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:25Z","lastTransitionTime":"2026-01-06T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.898213 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.898300 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.898323 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.898352 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:25 crc kubenswrapper[4784]: I0106 08:16:25.898373 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:25Z","lastTransitionTime":"2026-01-06T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.001269 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.001327 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.001346 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.001371 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.001389 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:26Z","lastTransitionTime":"2026-01-06T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.104742 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.104878 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.104898 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.104927 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.104946 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:26Z","lastTransitionTime":"2026-01-06T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.208236 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.208296 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.208313 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.208335 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.208352 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:26Z","lastTransitionTime":"2026-01-06T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.311285 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.311295 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.311392 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.311404 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:26 crc kubenswrapper[4784]: E0106 08:16:26.311573 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:26 crc kubenswrapper[4784]: E0106 08:16:26.311748 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.311805 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:26 crc kubenswrapper[4784]: E0106 08:16:26.311824 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.311842 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.311864 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.311897 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.311921 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:26Z","lastTransitionTime":"2026-01-06T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:26 crc kubenswrapper[4784]: E0106 08:16:26.311976 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.415831 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.415898 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.415912 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.415935 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.415954 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:26Z","lastTransitionTime":"2026-01-06T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.519157 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.519196 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.519204 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.519219 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.519228 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:26Z","lastTransitionTime":"2026-01-06T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.622177 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.622239 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.622256 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.622278 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.622297 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:26Z","lastTransitionTime":"2026-01-06T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.724784 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.724830 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.724841 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.724856 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.724866 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:26Z","lastTransitionTime":"2026-01-06T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.828241 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.828306 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.828323 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.828347 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.828369 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:26Z","lastTransitionTime":"2026-01-06T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.932253 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.932313 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.932324 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.932339 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:26 crc kubenswrapper[4784]: I0106 08:16:26.932351 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:26Z","lastTransitionTime":"2026-01-06T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.035047 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.035112 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.035129 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.035155 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.035172 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:27Z","lastTransitionTime":"2026-01-06T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.138619 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.138699 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.138726 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.138760 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.138782 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:27Z","lastTransitionTime":"2026-01-06T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.241188 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.241236 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.241252 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.241271 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.241287 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:27Z","lastTransitionTime":"2026-01-06T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.344092 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.344141 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.344158 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.344177 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.344193 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:27Z","lastTransitionTime":"2026-01-06T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.446994 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.447041 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.447059 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.447078 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.447095 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:27Z","lastTransitionTime":"2026-01-06T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.549616 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.549671 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.549690 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.549713 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.549734 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:27Z","lastTransitionTime":"2026-01-06T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.652642 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.652684 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.652695 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.652710 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.652724 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:27Z","lastTransitionTime":"2026-01-06T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.756449 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.756520 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.756534 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.756580 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.756595 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:27Z","lastTransitionTime":"2026-01-06T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.859146 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.859173 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.859181 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.859192 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.859201 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:27Z","lastTransitionTime":"2026-01-06T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.961517 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.961605 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.961625 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.961649 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:27 crc kubenswrapper[4784]: I0106 08:16:27.961665 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:27Z","lastTransitionTime":"2026-01-06T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.065443 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.065497 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.065513 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.065532 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.065570 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:28Z","lastTransitionTime":"2026-01-06T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.168912 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.168985 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.169010 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.169037 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.169058 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:28Z","lastTransitionTime":"2026-01-06T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.272002 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.272167 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.272202 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.272232 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.272256 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:28Z","lastTransitionTime":"2026-01-06T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.311722 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.311807 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.311904 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:28 crc kubenswrapper[4784]: E0106 08:16:28.311926 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.311966 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:28 crc kubenswrapper[4784]: E0106 08:16:28.312012 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:28 crc kubenswrapper[4784]: E0106 08:16:28.312088 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:28 crc kubenswrapper[4784]: E0106 08:16:28.312306 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.334385 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.346042 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.358226 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.374849 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.374900 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.374910 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.374926 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.374938 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:28Z","lastTransitionTime":"2026-01-06T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.382169 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dff2e41c-116e-419f-93a9-06998bea2b86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f6af050344e221c97333671e485113faccc52b13a5a44f5dcb28d462d56b78d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89918edb3ee39dd36af490674dfac92a456dd748c1b463912d3ac90dc593d433\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1acd2684da1d9b18f7f4316b3b0d2e4467114c556352fbfac2ee52cdbd690a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0493343d4ae93f1cf27349de7734f1783b5898f405e99afa96220b1a84e1356e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca141ead88c336c6283de282c5ace12280e66f956d81b84ddf87c78ce26ba36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d35dfaea112bda776f476f94a84d98c7a45e08f2c51036f63a30eb378142020d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d35dfaea112bda776f476f94a84d98c7a45e08f2c51036f63a30eb378142020d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://703e211b4eefc2eb4b2bb85dfbcf425fcb20e424dcb36af9504ae26d7e9906b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://703e211b4eefc2eb4b2bb85dfbcf425fcb20e424dcb36af9504ae26d7e9906b6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://10656748fb846e4325fb54271358ae5945f8b07e38c2c503d02c08ad1c400907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10656748fb846e4325fb54271358ae5945f8b07e38c2c503d02c08ad1c400907\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.394272 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2a74be2-be48-4b42-9c2c-1fd8040c62ee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31a7b648a0338258b4719dc2489659cbb9974805adf9f592f4a95aa561433c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.409733 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.424652 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.438761 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.449915 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.471342 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.477004 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.477058 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.477071 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.477089 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.477101 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:28Z","lastTransitionTime":"2026-01-06T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.483987 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.494866 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.506857 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.518577 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.530121 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a19c109a46a0e3207cb4071747c8e1f3148870f185be59d729052b2547792c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:16:08Z\\\",\\\"message\\\":\\\"2026-01-06T08:15:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25\\\\n2026-01-06T08:15:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25 to /host/opt/cni/bin/\\\\n2026-01-06T08:15:23Z [verbose] multus-daemon started\\\\n2026-01-06T08:15:23Z [verbose] Readiness Indicator file check\\\\n2026-01-06T08:16:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.542285 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.559123 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45af13f678a9e987511f7bdbc7a75beb02654321cc5d6239ed322dffc13fe049\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:15:48Z\\\",\\\"message\\\":\\\"ing reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.392905 6436 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0106 08:15:48.393026 6436 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.393302 6436 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0106 08:15:48.392004 6436 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0106 08:15:48.393603 6436 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0106 08:15:48.393882 6436 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0106 08:15:48.394386 6436 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0106 08:15:48.394401 6436 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0106 08:15:48.394428 6436 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0106 08:15:48.394448 6436 factory.go:656] Stopping watch factory\\\\nI0106 08:15:48.394462 6436 ovnkube.go:599] Stopped ovnkube\\\\nI0106 08:15:48.394485 6436 handler.go:208] Removed *v1.Node event handler 2\\\\nI0106 08:15:48.394496 6436 handler.go:208] Removed *v1.Node event handler 7\\\\nI0106 08:15:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:16:15Z\\\",\\\"message\\\":\\\"ncer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0106 08:16:15.583037 6827 services_controller.go:360] Finished syncing service image-registry-operator on namespace openshift-image-registry for network=default : 23.97µs\\\\nI0106 08:16:15.583044 6827 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durabl\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:16:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.571250 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.581702 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.581762 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.581778 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.581799 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.581820 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:28Z","lastTransitionTime":"2026-01-06T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.591676 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"289c7f80-774f-4019-a3a9-b6bd325bc76d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cea0c4194d26b1d6e3a19a06f925dcb89e24e2ccacf253f91330141defd12532\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c09368fd00349fc66f7f1e770c5553457be595c44283c8076f38d8fbdf94613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://86bd77cfbf0d2c7762b7ffae176effd11cb8be214d98b92e4c19d82bdd574621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:28Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.685384 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.685456 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.685468 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.685482 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.685491 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:28Z","lastTransitionTime":"2026-01-06T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.788257 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.788293 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.788302 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.788315 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.788324 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:28Z","lastTransitionTime":"2026-01-06T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.891320 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.891386 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.891403 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.891429 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.891447 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:28Z","lastTransitionTime":"2026-01-06T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.994937 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.995017 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.995043 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.995075 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:28 crc kubenswrapper[4784]: I0106 08:16:28.995098 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:28Z","lastTransitionTime":"2026-01-06T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.099011 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.099126 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.099145 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.099168 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.099185 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:29Z","lastTransitionTime":"2026-01-06T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.202811 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.202881 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.202901 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.202923 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.202936 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:29Z","lastTransitionTime":"2026-01-06T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.306083 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.306168 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.306191 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.306214 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.306231 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:29Z","lastTransitionTime":"2026-01-06T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.312476 4784 scope.go:117] "RemoveContainer" containerID="c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df" Jan 06 08:16:29 crc kubenswrapper[4784]: E0106 08:16:29.312696 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\"" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.332676 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.349680 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.368964 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a19c109a46a0e3207cb4071747c8e1f3148870f185be59d729052b2547792c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:16:08Z\\\",\\\"message\\\":\\\"2026-01-06T08:15:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25\\\\n2026-01-06T08:15:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25 to /host/opt/cni/bin/\\\\n2026-01-06T08:15:23Z [verbose] multus-daemon started\\\\n2026-01-06T08:15:23Z [verbose] Readiness Indicator file check\\\\n2026-01-06T08:16:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.386180 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"289c7f80-774f-4019-a3a9-b6bd325bc76d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cea0c4194d26b1d6e3a19a06f925dcb89e24e2ccacf253f91330141defd12532\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c09368fd00349fc66f7f1e770c5553457be595c44283c8076f38d8fbdf94613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://86bd77cfbf0d2c7762b7ffae176effd11cb8be214d98b92e4c19d82bdd574621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.409277 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.409314 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.409324 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.409339 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.409349 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:29Z","lastTransitionTime":"2026-01-06T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.414559 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:16:15Z\\\",\\\"message\\\":\\\"ncer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0106 08:16:15.583037 6827 services_controller.go:360] Finished syncing service image-registry-operator on namespace openshift-image-registry for network=default : 23.97µs\\\\nI0106 08:16:15.583044 6827 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durabl\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:16:14Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.430488 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.448987 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.468359 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.484600 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.508391 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.512742 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.512788 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.512805 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.512828 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.512846 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:29Z","lastTransitionTime":"2026-01-06T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.526105 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.551386 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dff2e41c-116e-419f-93a9-06998bea2b86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f6af050344e221c97333671e485113faccc52b13a5a44f5dcb28d462d56b78d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89918edb3ee39dd36af490674dfac92a456dd748c1b463912d3ac90dc593d433\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1acd2684da1d9b18f7f4316b3b0d2e4467114c556352fbfac2ee52cdbd690a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0493343d4ae93f1cf27349de7734f1783b5898f405e99afa96220b1a84e1356e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca141ead88c336c6283de282c5ace12280e66f956d81b84ddf87c78ce26ba36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d35dfaea112bda776f476f94a84d98c7a45e08f2c51036f63a30eb378142020d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d35dfaea112bda776f476f94a84d98c7a45e08f2c51036f63a30eb378142020d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://703e211b4eefc2eb4b2bb85dfbcf425fcb20e424dcb36af9504ae26d7e9906b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://703e211b4eefc2eb4b2bb85dfbcf425fcb20e424dcb36af9504ae26d7e9906b6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://10656748fb846e4325fb54271358ae5945f8b07e38c2c503d02c08ad1c400907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10656748fb846e4325fb54271358ae5945f8b07e38c2c503d02c08ad1c400907\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.564962 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2a74be2-be48-4b42-9c2c-1fd8040c62ee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31a7b648a0338258b4719dc2489659cbb9974805adf9f592f4a95aa561433c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.586630 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.609630 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.615812 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.615876 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.615897 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.615923 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.615940 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:29Z","lastTransitionTime":"2026-01-06T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.626888 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.640511 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.660042 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.674265 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:29Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.718283 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.718334 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.718345 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.718358 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.718368 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:29Z","lastTransitionTime":"2026-01-06T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.821674 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.821748 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.821766 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.821791 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.821811 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:29Z","lastTransitionTime":"2026-01-06T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.925354 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.925429 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.925447 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.925470 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:29 crc kubenswrapper[4784]: I0106 08:16:29.925488 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:29Z","lastTransitionTime":"2026-01-06T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.029083 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.029137 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.029153 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.029177 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.029194 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:30Z","lastTransitionTime":"2026-01-06T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.132797 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.132872 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.132890 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.132915 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.132934 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:30Z","lastTransitionTime":"2026-01-06T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.236698 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.236797 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.236817 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.236840 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.236859 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:30Z","lastTransitionTime":"2026-01-06T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.311328 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.311373 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:30 crc kubenswrapper[4784]: E0106 08:16:30.311588 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.311613 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.311613 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:30 crc kubenswrapper[4784]: E0106 08:16:30.311733 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:30 crc kubenswrapper[4784]: E0106 08:16:30.312272 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:30 crc kubenswrapper[4784]: E0106 08:16:30.312456 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.339831 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.339904 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.339925 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.339956 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.339978 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:30Z","lastTransitionTime":"2026-01-06T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.442925 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.443011 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.443036 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.443067 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.443091 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:30Z","lastTransitionTime":"2026-01-06T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.546680 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.546747 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.546769 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.546794 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.546813 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:30Z","lastTransitionTime":"2026-01-06T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.650373 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.650808 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.650972 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.651123 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.651289 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:30Z","lastTransitionTime":"2026-01-06T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.754814 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.754888 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.754900 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.754920 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.754931 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:30Z","lastTransitionTime":"2026-01-06T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.857864 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.857925 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.857937 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.857958 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.857973 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:30Z","lastTransitionTime":"2026-01-06T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.881836 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.882919 4784 scope.go:117] "RemoveContainer" containerID="c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df" Jan 06 08:16:30 crc kubenswrapper[4784]: E0106 08:16:30.883127 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\"" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.960641 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.960698 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.960713 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.960740 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:30 crc kubenswrapper[4784]: I0106 08:16:30.960755 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:30Z","lastTransitionTime":"2026-01-06T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.014172 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.014245 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.014264 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.014288 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.014307 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:31Z","lastTransitionTime":"2026-01-06T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:31 crc kubenswrapper[4784]: E0106 08:16:31.035517 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.041439 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.041506 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.041524 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.041590 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.041661 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:31Z","lastTransitionTime":"2026-01-06T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:31 crc kubenswrapper[4784]: E0106 08:16:31.059205 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.064872 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.064926 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.064972 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.064997 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.065018 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:31Z","lastTransitionTime":"2026-01-06T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:31 crc kubenswrapper[4784]: E0106 08:16:31.085912 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.091332 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.091417 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.091438 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.091471 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.091496 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:31Z","lastTransitionTime":"2026-01-06T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:31 crc kubenswrapper[4784]: E0106 08:16:31.109171 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.118505 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.118769 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.118798 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.118837 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.118875 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:31Z","lastTransitionTime":"2026-01-06T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:31 crc kubenswrapper[4784]: E0106 08:16:31.139640 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f9cdf15e-824d-44fb-aab6-0ddaf0e8ba95\\\",\\\"systemUUID\\\":\\\"4602588f-a4e8-4d03-9d3c-a153f288ba5f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:31Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:31 crc kubenswrapper[4784]: E0106 08:16:31.139856 4784 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.141916 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.141966 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.141981 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.142000 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.142014 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:31Z","lastTransitionTime":"2026-01-06T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.246333 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.246403 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.246426 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.246455 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.246481 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:31Z","lastTransitionTime":"2026-01-06T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.349639 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.349695 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.349712 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.349735 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.349755 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:31Z","lastTransitionTime":"2026-01-06T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.452695 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.452763 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.452787 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.452814 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.452834 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:31Z","lastTransitionTime":"2026-01-06T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.556356 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.556418 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.556438 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.556459 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.556474 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:31Z","lastTransitionTime":"2026-01-06T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.659969 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.660020 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.660036 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.660062 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.660078 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:31Z","lastTransitionTime":"2026-01-06T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.763715 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.763785 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.763802 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.763826 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.763843 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:31Z","lastTransitionTime":"2026-01-06T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.867441 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.867502 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.867512 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.867532 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.867585 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:31Z","lastTransitionTime":"2026-01-06T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.970218 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.970294 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.970318 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.970346 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:31 crc kubenswrapper[4784]: I0106 08:16:31.970369 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:31Z","lastTransitionTime":"2026-01-06T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.074102 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.074144 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.074155 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.074172 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.074187 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:32Z","lastTransitionTime":"2026-01-06T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.177569 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.177635 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.177655 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.177680 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.177697 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:32Z","lastTransitionTime":"2026-01-06T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.280049 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.280107 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.280124 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.280146 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.280163 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:32Z","lastTransitionTime":"2026-01-06T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.311822 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.311850 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.311889 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.311945 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:32 crc kubenswrapper[4784]: E0106 08:16:32.312072 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:32 crc kubenswrapper[4784]: E0106 08:16:32.312336 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:32 crc kubenswrapper[4784]: E0106 08:16:32.312502 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:32 crc kubenswrapper[4784]: E0106 08:16:32.312606 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.382631 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.382697 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.382718 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.382745 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.382766 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:32Z","lastTransitionTime":"2026-01-06T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.486126 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.486199 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.486218 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.486242 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.486263 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:32Z","lastTransitionTime":"2026-01-06T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.589641 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.589704 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.589721 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.589744 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.589764 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:32Z","lastTransitionTime":"2026-01-06T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.693118 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.693176 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.693193 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.693215 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.693231 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:32Z","lastTransitionTime":"2026-01-06T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.795507 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.795582 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.795593 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.795608 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.795618 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:32Z","lastTransitionTime":"2026-01-06T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.898377 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.898435 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.898456 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.898483 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:32 crc kubenswrapper[4784]: I0106 08:16:32.898509 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:32Z","lastTransitionTime":"2026-01-06T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.002075 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.002139 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.002163 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.002187 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.002204 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:33Z","lastTransitionTime":"2026-01-06T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.107026 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.107110 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.107130 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.107159 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.107178 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:33Z","lastTransitionTime":"2026-01-06T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.209835 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.209894 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.209914 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.209938 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.209958 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:33Z","lastTransitionTime":"2026-01-06T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.312791 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.312849 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.312865 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.312889 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.312908 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:33Z","lastTransitionTime":"2026-01-06T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.415803 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.415871 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.415892 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.415917 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.415934 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:33Z","lastTransitionTime":"2026-01-06T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.524411 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.524455 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.537406 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.537491 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.537580 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:33Z","lastTransitionTime":"2026-01-06T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.639920 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.639989 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.640011 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.640038 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.640060 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:33Z","lastTransitionTime":"2026-01-06T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.743370 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.743811 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.744207 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.744355 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.744530 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:33Z","lastTransitionTime":"2026-01-06T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.848012 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.848084 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.848111 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.848141 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.848161 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:33Z","lastTransitionTime":"2026-01-06T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.950773 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.950837 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.950854 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.950876 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:33 crc kubenswrapper[4784]: I0106 08:16:33.950893 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:33Z","lastTransitionTime":"2026-01-06T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.054058 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.054716 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.055056 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.055282 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.055467 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:34Z","lastTransitionTime":"2026-01-06T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.158504 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.158575 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.158592 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.158616 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.158633 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:34Z","lastTransitionTime":"2026-01-06T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.261248 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.261360 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.261388 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.261415 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.261435 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:34Z","lastTransitionTime":"2026-01-06T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.311464 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:34 crc kubenswrapper[4784]: E0106 08:16:34.311780 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.311839 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:34 crc kubenswrapper[4784]: E0106 08:16:34.312242 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.311900 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:34 crc kubenswrapper[4784]: E0106 08:16:34.312749 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.311848 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:34 crc kubenswrapper[4784]: E0106 08:16:34.313468 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.364288 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.364354 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.364374 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.364397 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.364414 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:34Z","lastTransitionTime":"2026-01-06T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.467867 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.467934 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.467951 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.467977 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.467995 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:34Z","lastTransitionTime":"2026-01-06T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.571237 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.571367 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.571445 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.571521 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.571579 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:34Z","lastTransitionTime":"2026-01-06T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.674684 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.674864 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.674887 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.674916 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.674939 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:34Z","lastTransitionTime":"2026-01-06T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.778616 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.778722 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.778741 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.778776 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.778800 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:34Z","lastTransitionTime":"2026-01-06T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.882429 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.882526 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.882569 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.882595 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.882612 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:34Z","lastTransitionTime":"2026-01-06T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.986045 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.986120 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.986144 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.986173 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:34 crc kubenswrapper[4784]: I0106 08:16:34.986195 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:34Z","lastTransitionTime":"2026-01-06T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.089179 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.089246 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.089259 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.089281 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.089295 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:35Z","lastTransitionTime":"2026-01-06T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.193078 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.193146 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.193166 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.193194 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.193214 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:35Z","lastTransitionTime":"2026-01-06T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.295894 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.295954 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.295971 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.295996 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.296014 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:35Z","lastTransitionTime":"2026-01-06T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.399992 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.400035 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.400047 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.400062 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.400073 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:35Z","lastTransitionTime":"2026-01-06T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.504883 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.504931 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.504942 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.504958 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.504969 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:35Z","lastTransitionTime":"2026-01-06T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.608340 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.608389 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.608409 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.608429 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.608447 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:35Z","lastTransitionTime":"2026-01-06T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.711473 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.711519 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.711570 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.711602 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.711624 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:35Z","lastTransitionTime":"2026-01-06T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.814468 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.814524 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.814573 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.814604 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.814625 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:35Z","lastTransitionTime":"2026-01-06T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.917912 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.917979 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.917996 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.918026 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:35 crc kubenswrapper[4784]: I0106 08:16:35.918049 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:35Z","lastTransitionTime":"2026-01-06T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.021112 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.021178 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.021195 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.021219 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.021239 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:36Z","lastTransitionTime":"2026-01-06T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.124273 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.124332 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.124348 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.124371 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.124387 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:36Z","lastTransitionTime":"2026-01-06T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.227372 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.227428 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.227451 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.227501 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.227526 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:36Z","lastTransitionTime":"2026-01-06T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.312159 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.312224 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.312224 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:36 crc kubenswrapper[4784]: E0106 08:16:36.312356 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.312440 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:36 crc kubenswrapper[4784]: E0106 08:16:36.312631 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:36 crc kubenswrapper[4784]: E0106 08:16:36.312726 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:36 crc kubenswrapper[4784]: E0106 08:16:36.312833 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.329726 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.329791 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.329810 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.329834 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.329851 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:36Z","lastTransitionTime":"2026-01-06T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.433428 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.433492 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.433517 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.433588 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.433629 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:36Z","lastTransitionTime":"2026-01-06T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.536933 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.537000 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.537019 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.537042 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.537061 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:36Z","lastTransitionTime":"2026-01-06T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.643963 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.644057 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.644072 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.644119 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.644139 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:36Z","lastTransitionTime":"2026-01-06T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.747470 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.747534 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.747574 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.747597 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.747615 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:36Z","lastTransitionTime":"2026-01-06T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.851104 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.851195 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.851215 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.851239 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.851258 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:36Z","lastTransitionTime":"2026-01-06T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.954213 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.954258 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.954273 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.954292 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:36 crc kubenswrapper[4784]: I0106 08:16:36.954308 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:36Z","lastTransitionTime":"2026-01-06T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.057682 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.057759 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.057780 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.057808 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.057829 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:37Z","lastTransitionTime":"2026-01-06T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.161534 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.161621 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.161638 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.161664 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.161681 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:37Z","lastTransitionTime":"2026-01-06T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.265016 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.265075 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.265094 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.265117 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.265134 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:37Z","lastTransitionTime":"2026-01-06T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.367993 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.368063 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.368084 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.368112 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.368133 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:37Z","lastTransitionTime":"2026-01-06T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.471350 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.471396 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.471410 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.471429 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.471444 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:37Z","lastTransitionTime":"2026-01-06T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.577508 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.577580 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.577600 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.577624 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.577641 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:37Z","lastTransitionTime":"2026-01-06T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.681208 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.681279 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.681297 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.681322 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.681340 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:37Z","lastTransitionTime":"2026-01-06T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.785137 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.785210 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.785228 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.785253 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.785270 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:37Z","lastTransitionTime":"2026-01-06T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.888751 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.888825 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.888849 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.888880 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.888903 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:37Z","lastTransitionTime":"2026-01-06T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.991143 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.991190 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.991211 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.991234 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:37 crc kubenswrapper[4784]: I0106 08:16:37.991248 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:37Z","lastTransitionTime":"2026-01-06T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.093775 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.093820 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.093832 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.093849 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.093862 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:38Z","lastTransitionTime":"2026-01-06T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.196860 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.196918 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.196932 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.196954 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.196970 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:38Z","lastTransitionTime":"2026-01-06T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.300309 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.300439 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.300461 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.300490 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.300510 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:38Z","lastTransitionTime":"2026-01-06T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.311737 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.311820 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.311857 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:38 crc kubenswrapper[4784]: E0106 08:16:38.312095 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:38 crc kubenswrapper[4784]: E0106 08:16:38.312269 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.312358 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:38 crc kubenswrapper[4784]: E0106 08:16:38.312445 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:38 crc kubenswrapper[4784]: E0106 08:16:38.312507 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.339120 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d655bc643eea21cf8a8a0abab00e85012598ea57eb338d81f1d4046c1de19f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.357082 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c56427bca8fb7be45a41afe39b87155406d2b4c2737d113730623b9adaca3060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c4570e283167262caa54d2d11d302eb65011dee67ea45a7015dee4675d26c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.371870 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-62dc6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"101ca23a-112e-4976-bfec-16a98fcfbd0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bfe8d08452a8fcc1951d9b1a671ef00af1510e2e82c425606c0e22be6220f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6h2db\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-62dc6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.389329 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94b52312-7b54-4df2-ab82-0eb7b01334f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55928a09ffc66f3760615c59cd6ebf56636a3d641e2589d614fc277341681464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85vh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-68nth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.403744 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.403826 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.403841 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.403910 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.403926 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:38Z","lastTransitionTime":"2026-01-06T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.408917 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-24ksn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35dd8181-ce20-4a99-a883-84811f75e0a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2765e1ce5b3f2096a2d02b2a775927af8fc07aa65f5c4c9066bfdc07b410f333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://916248f2449846eba4b28ce59ce51da892f2d8021287bca61d08b16040a3dc22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://25cd834100b52002b97882d249ce093d4ad6811f35f5ea83ff3b28eeef8b5ede\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8cf8f615659806a4ebaa4624a7fdde1401e7dac3a7dbef2738ece6472002be4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c18701f7b5833c326dd5ccfaceb5f32709bca3747276f5b9a31fb493e52d9d1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92bb678cd3629d621319de380cb83b57b18995a82e24670a0d09d42171b70051\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c7e9013a64393d0f69e98884428ff5113e9edba9b80c8f92a7ce27c0690dbd1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmp8f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-24ksn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.430452 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e91f99f6-1ed1-4187-a372-ad133ddcc5f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://04b04653b06b532fc3a2f8583d160498013506385a0300c9a18179e197d954df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d51bdf0b5bbddbac02c59e998a98dccd33f70201b259c0d714632c1c178cfcf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a340f1a5761b7d5f8c00ea02b5975b7286e522c32ee90570110c14fca8bd9e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.458140 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dff2e41c-116e-419f-93a9-06998bea2b86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f6af050344e221c97333671e485113faccc52b13a5a44f5dcb28d462d56b78d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://89918edb3ee39dd36af490674dfac92a456dd748c1b463912d3ac90dc593d433\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1acd2684da1d9b18f7f4316b3b0d2e4467114c556352fbfac2ee52cdbd690a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0493343d4ae93f1cf27349de7734f1783b5898f405e99afa96220b1a84e1356e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca141ead88c336c6283de282c5ace12280e66f956d81b84ddf87c78ce26ba36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d35dfaea112bda776f476f94a84d98c7a45e08f2c51036f63a30eb378142020d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d35dfaea112bda776f476f94a84d98c7a45e08f2c51036f63a30eb378142020d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://703e211b4eefc2eb4b2bb85dfbcf425fcb20e424dcb36af9504ae26d7e9906b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://703e211b4eefc2eb4b2bb85dfbcf425fcb20e424dcb36af9504ae26d7e9906b6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://10656748fb846e4325fb54271358ae5945f8b07e38c2c503d02c08ad1c400907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10656748fb846e4325fb54271358ae5945f8b07e38c2c503d02c08ad1c400907\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.477127 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2a74be2-be48-4b42-9c2c-1fd8040c62ee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31a7b648a0338258b4719dc2489659cbb9974805adf9f592f4a95aa561433c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded45e3f154d736950c4b903c139991aef54f914b4457f3ec26a5db64fced60e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.495951 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1d667f3-955d-428f-bbba-0e05e712b235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16dd2d3a265cd1e33a04b505f439d42934e93a0edff73620aaa4fda869a6d31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ebc08aee4049a76f44db681c7e868c6f5be84c005b17b6c530c1e99cfeb13d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xx86g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cf6ph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.507064 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.507110 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.507121 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.507137 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.507150 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:38Z","lastTransitionTime":"2026-01-06T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.513356 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xfktc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e957a369-1cc7-450b-821f-3ee12341caef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9g959\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xfktc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.535586 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7154710e-52c4-45a9-8a9d-87cf8ea16f5c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0106 08:15:10.830979 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0106 08:15:10.832917 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3791084794/tls.crt::/tmp/serving-cert-3791084794/tls.key\\\\\\\"\\\\nI0106 08:15:16.429169 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0106 08:15:16.431818 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0106 08:15:16.431839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0106 08:15:16.431867 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0106 08:15:16.431873 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0106 08:15:16.439704 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0106 08:15:16.439727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439732 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0106 08:15:16.439739 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0106 08:15:16.439743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0106 08:15:16.439747 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0106 08:15:16.439750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0106 08:15:16.439903 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0106 08:15:16.443871 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.553354 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.572759 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-l2xdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"85f24cc3-ceca-49ce-b774-32e773e72c1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a19c109a46a0e3207cb4071747c8e1f3148870f185be59d729052b2547792c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:16:08Z\\\",\\\"message\\\":\\\"2026-01-06T08:15:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25\\\\n2026-01-06T08:15:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_84811dbb-c05a-4393-9f84-74c216a9ef25 to /host/opt/cni/bin/\\\\n2026-01-06T08:15:23Z [verbose] multus-daemon started\\\\n2026-01-06T08:15:23Z [verbose] Readiness Indicator file check\\\\n2026-01-06T08:16:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:22Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kgcf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-l2xdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.590939 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"289c7f80-774f-4019-a3a9-b6bd325bc76d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cea0c4194d26b1d6e3a19a06f925dcb89e24e2ccacf253f91330141defd12532\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c09368fd00349fc66f7f1e770c5553457be595c44283c8076f38d8fbdf94613\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://86bd77cfbf0d2c7762b7ffae176effd11cb8be214d98b92e4c19d82bdd574621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8cfcc74cd5db76d1438a483916e06b473bd2294a779328c2021891979036d655\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:14:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:14:59Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:14:58Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.609372 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.609436 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.609446 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.609458 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.609467 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:38Z","lastTransitionTime":"2026-01-06T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.616138 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"700c7389-9fff-4331-9d37-6af2ff592ac5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:22Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-06T08:16:15Z\\\",\\\"message\\\":\\\"ncer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0106 08:16:15.583037 6827 services_controller.go:360] Finished syncing service image-registry-operator on namespace openshift-image-registry for network=default : 23.97µs\\\\nI0106 08:16:15.583044 6827 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durabl\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-06T08:16:14Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-06T08:15:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-06T08:15:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvbtl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-blw4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.631604 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qnthf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1769680a-b6b7-4ffd-98db-76a67c46caf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed7aa6f019f384462f2d6123dbd2f35b8616023322aa137a3da79f02432fb14c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x5p9j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-06T08:15:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qnthf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.648809 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.668086 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.680901 4784 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-06T08:15:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7e63a806c02cdc9ce339c644f98255c859ad8203f265e47b6700eb0d632379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-06T08:15:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-06T08:16:38Z is after 2025-08-24T17:21:41Z" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.712201 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.712245 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.712257 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.712275 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.712288 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:38Z","lastTransitionTime":"2026-01-06T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.814752 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.814842 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.814865 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.814897 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.814921 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:38Z","lastTransitionTime":"2026-01-06T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.919200 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.920105 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.920136 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.920164 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:38 crc kubenswrapper[4784]: I0106 08:16:38.920184 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:38Z","lastTransitionTime":"2026-01-06T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.022860 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.022903 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.022916 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.022932 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.022944 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:39Z","lastTransitionTime":"2026-01-06T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.126601 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.126670 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.126695 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.126728 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.126753 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:39Z","lastTransitionTime":"2026-01-06T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.230163 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.230258 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.230277 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.230311 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.230336 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:39Z","lastTransitionTime":"2026-01-06T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.334722 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.334817 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.334844 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.334881 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.334909 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:39Z","lastTransitionTime":"2026-01-06T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.438527 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.438609 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.438624 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.438644 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.438660 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:39Z","lastTransitionTime":"2026-01-06T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.542361 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.542436 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.542453 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.542479 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.542498 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:39Z","lastTransitionTime":"2026-01-06T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.646203 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.646269 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.646288 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.646317 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.646335 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:39Z","lastTransitionTime":"2026-01-06T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.749785 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.749907 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.749930 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.749962 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.749983 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:39Z","lastTransitionTime":"2026-01-06T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.853649 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.853698 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.853721 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.853750 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.853772 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:39Z","lastTransitionTime":"2026-01-06T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.957627 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.957722 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.957743 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.957804 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:39 crc kubenswrapper[4784]: I0106 08:16:39.957824 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:39Z","lastTransitionTime":"2026-01-06T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.061368 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.061453 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.061473 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.061496 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.061514 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:40Z","lastTransitionTime":"2026-01-06T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.164952 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.165006 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.165023 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.165043 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.165054 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:40Z","lastTransitionTime":"2026-01-06T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.268406 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.268459 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.268476 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.268501 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.268519 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:40Z","lastTransitionTime":"2026-01-06T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.311735 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.311900 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.311751 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:40 crc kubenswrapper[4784]: E0106 08:16:40.311946 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.311749 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:40 crc kubenswrapper[4784]: E0106 08:16:40.312123 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:40 crc kubenswrapper[4784]: E0106 08:16:40.312255 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:40 crc kubenswrapper[4784]: E0106 08:16:40.312485 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.371935 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.371987 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.372004 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.372029 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.372047 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:40Z","lastTransitionTime":"2026-01-06T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.406844 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs\") pod \"network-metrics-daemon-xfktc\" (UID: \"e957a369-1cc7-450b-821f-3ee12341caef\") " pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:40 crc kubenswrapper[4784]: E0106 08:16:40.407069 4784 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:16:40 crc kubenswrapper[4784]: E0106 08:16:40.407190 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs podName:e957a369-1cc7-450b-821f-3ee12341caef nodeName:}" failed. No retries permitted until 2026-01-06 08:17:44.40715684 +0000 UTC m=+166.453329727 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs") pod "network-metrics-daemon-xfktc" (UID: "e957a369-1cc7-450b-821f-3ee12341caef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.475302 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.475447 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.475474 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.475507 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.475739 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:40Z","lastTransitionTime":"2026-01-06T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.578833 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.578884 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.578898 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.578920 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.578931 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:40Z","lastTransitionTime":"2026-01-06T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.682021 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.682075 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.682093 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.682116 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.682132 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:40Z","lastTransitionTime":"2026-01-06T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.785458 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.785518 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.785534 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.785588 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.785607 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:40Z","lastTransitionTime":"2026-01-06T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.888247 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.888329 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.888347 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.888371 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.888389 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:40Z","lastTransitionTime":"2026-01-06T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.992308 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.992377 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.992392 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.992417 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:40 crc kubenswrapper[4784]: I0106 08:16:40.992435 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:40Z","lastTransitionTime":"2026-01-06T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.095795 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.095904 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.095929 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.095969 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.095997 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:41Z","lastTransitionTime":"2026-01-06T08:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.155942 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.156025 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.156037 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.156060 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.156077 4784 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-06T08:16:41Z","lastTransitionTime":"2026-01-06T08:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.227616 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx"] Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.229016 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.234073 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.234151 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.234393 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.234474 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.317294 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/363ce36c-181d-48cb-8f5f-d60b5009e713-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-qq7lx\" (UID: \"363ce36c-181d-48cb-8f5f-d60b5009e713\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.317388 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/363ce36c-181d-48cb-8f5f-d60b5009e713-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-qq7lx\" (UID: \"363ce36c-181d-48cb-8f5f-d60b5009e713\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.317531 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/363ce36c-181d-48cb-8f5f-d60b5009e713-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-qq7lx\" (UID: \"363ce36c-181d-48cb-8f5f-d60b5009e713\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.317666 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/363ce36c-181d-48cb-8f5f-d60b5009e713-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-qq7lx\" (UID: \"363ce36c-181d-48cb-8f5f-d60b5009e713\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.317722 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/363ce36c-181d-48cb-8f5f-d60b5009e713-service-ca\") pod \"cluster-version-operator-5c965bbfc6-qq7lx\" (UID: \"363ce36c-181d-48cb-8f5f-d60b5009e713\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.356793 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-62dc6" podStartSLOduration=80.356770766 podStartE2EDuration="1m20.356770766s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:16:41.356735404 +0000 UTC m=+103.402908281" watchObservedRunningTime="2026-01-06 08:16:41.356770766 +0000 UTC m=+103.402943613" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.396996 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-24ksn" podStartSLOduration=80.39696993 podStartE2EDuration="1m20.39696993s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:16:41.394898026 +0000 UTC m=+103.441070893" watchObservedRunningTime="2026-01-06 08:16:41.39696993 +0000 UTC m=+103.443142807" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.397421 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podStartSLOduration=80.397413414 podStartE2EDuration="1m20.397413414s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:16:41.371356847 +0000 UTC m=+103.417529694" watchObservedRunningTime="2026-01-06 08:16:41.397413414 +0000 UTC m=+103.443586291" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.418582 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/363ce36c-181d-48cb-8f5f-d60b5009e713-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-qq7lx\" (UID: \"363ce36c-181d-48cb-8f5f-d60b5009e713\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.418664 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/363ce36c-181d-48cb-8f5f-d60b5009e713-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-qq7lx\" (UID: \"363ce36c-181d-48cb-8f5f-d60b5009e713\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.418702 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/363ce36c-181d-48cb-8f5f-d60b5009e713-service-ca\") pod \"cluster-version-operator-5c965bbfc6-qq7lx\" (UID: \"363ce36c-181d-48cb-8f5f-d60b5009e713\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.418790 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/363ce36c-181d-48cb-8f5f-d60b5009e713-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-qq7lx\" (UID: \"363ce36c-181d-48cb-8f5f-d60b5009e713\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.418825 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/363ce36c-181d-48cb-8f5f-d60b5009e713-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-qq7lx\" (UID: \"363ce36c-181d-48cb-8f5f-d60b5009e713\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.419829 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/363ce36c-181d-48cb-8f5f-d60b5009e713-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-qq7lx\" (UID: \"363ce36c-181d-48cb-8f5f-d60b5009e713\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.419850 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/363ce36c-181d-48cb-8f5f-d60b5009e713-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-qq7lx\" (UID: \"363ce36c-181d-48cb-8f5f-d60b5009e713\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.420868 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/363ce36c-181d-48cb-8f5f-d60b5009e713-service-ca\") pod \"cluster-version-operator-5c965bbfc6-qq7lx\" (UID: \"363ce36c-181d-48cb-8f5f-d60b5009e713\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.425977 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=84.425959368 podStartE2EDuration="1m24.425959368s" podCreationTimestamp="2026-01-06 08:15:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:16:41.425833944 +0000 UTC m=+103.472006821" watchObservedRunningTime="2026-01-06 08:16:41.425959368 +0000 UTC m=+103.472132215" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.434000 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/363ce36c-181d-48cb-8f5f-d60b5009e713-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-qq7lx\" (UID: \"363ce36c-181d-48cb-8f5f-d60b5009e713\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.444665 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/363ce36c-181d-48cb-8f5f-d60b5009e713-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-qq7lx\" (UID: \"363ce36c-181d-48cb-8f5f-d60b5009e713\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.475201 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=26.475175012 podStartE2EDuration="26.475175012s" podCreationTimestamp="2026-01-06 08:16:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:16:41.47319636 +0000 UTC m=+103.519369237" watchObservedRunningTime="2026-01-06 08:16:41.475175012 +0000 UTC m=+103.521347889" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.487615 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=39.487592456 podStartE2EDuration="39.487592456s" podCreationTimestamp="2026-01-06 08:16:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:16:41.487580216 +0000 UTC m=+103.533753113" watchObservedRunningTime="2026-01-06 08:16:41.487592456 +0000 UTC m=+103.533765303" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.539752 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cf6ph" podStartSLOduration=79.53972355 podStartE2EDuration="1m19.53972355s" podCreationTimestamp="2026-01-06 08:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:16:41.522402705 +0000 UTC m=+103.568575552" watchObservedRunningTime="2026-01-06 08:16:41.53972355 +0000 UTC m=+103.585896427" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.552863 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.581702 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=84.581672989 podStartE2EDuration="1m24.581672989s" podCreationTimestamp="2026-01-06 08:15:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:16:41.558531433 +0000 UTC m=+103.604704310" watchObservedRunningTime="2026-01-06 08:16:41.581672989 +0000 UTC m=+103.627845856" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.615755 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-l2xdd" podStartSLOduration=80.615721684 podStartE2EDuration="1m20.615721684s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:16:41.614891298 +0000 UTC m=+103.661064175" watchObservedRunningTime="2026-01-06 08:16:41.615721684 +0000 UTC m=+103.661894521" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.636727 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=51.636695263 podStartE2EDuration="51.636695263s" podCreationTimestamp="2026-01-06 08:15:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:16:41.635759284 +0000 UTC m=+103.681932171" watchObservedRunningTime="2026-01-06 08:16:41.636695263 +0000 UTC m=+103.682868110" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.691825 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-qnthf" podStartSLOduration=80.69180078 podStartE2EDuration="1m20.69180078s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:16:41.691702406 +0000 UTC m=+103.737875273" watchObservedRunningTime="2026-01-06 08:16:41.69180078 +0000 UTC m=+103.737973617" Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.931723 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" event={"ID":"363ce36c-181d-48cb-8f5f-d60b5009e713","Type":"ContainerStarted","Data":"bdf4d3dd812c9f918c623f6a7c2fa78eb3a7788f0450180715a35d61ca95d9e4"} Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.931821 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" event={"ID":"363ce36c-181d-48cb-8f5f-d60b5009e713","Type":"ContainerStarted","Data":"0430dba3e7d8ef20c3caadd6982e3462021370c8b5a733774380a6b7d2656035"} Jan 06 08:16:41 crc kubenswrapper[4784]: I0106 08:16:41.953745 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qq7lx" podStartSLOduration=80.9537155 podStartE2EDuration="1m20.9537155s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:16:41.952501382 +0000 UTC m=+103.998674249" watchObservedRunningTime="2026-01-06 08:16:41.9537155 +0000 UTC m=+103.999888377" Jan 06 08:16:42 crc kubenswrapper[4784]: I0106 08:16:42.311981 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:42 crc kubenswrapper[4784]: I0106 08:16:42.312065 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:42 crc kubenswrapper[4784]: E0106 08:16:42.312172 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:42 crc kubenswrapper[4784]: I0106 08:16:42.312085 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:42 crc kubenswrapper[4784]: E0106 08:16:42.312281 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:42 crc kubenswrapper[4784]: E0106 08:16:42.312409 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:42 crc kubenswrapper[4784]: I0106 08:16:42.312615 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:42 crc kubenswrapper[4784]: E0106 08:16:42.312833 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:44 crc kubenswrapper[4784]: I0106 08:16:44.311400 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:44 crc kubenswrapper[4784]: I0106 08:16:44.311400 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:44 crc kubenswrapper[4784]: I0106 08:16:44.312644 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:44 crc kubenswrapper[4784]: E0106 08:16:44.312841 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:44 crc kubenswrapper[4784]: I0106 08:16:44.313094 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:44 crc kubenswrapper[4784]: E0106 08:16:44.313268 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:44 crc kubenswrapper[4784]: E0106 08:16:44.313197 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:44 crc kubenswrapper[4784]: E0106 08:16:44.313775 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:45 crc kubenswrapper[4784]: I0106 08:16:45.312271 4784 scope.go:117] "RemoveContainer" containerID="c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df" Jan 06 08:16:45 crc kubenswrapper[4784]: E0106 08:16:45.312413 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-blw4q_openshift-ovn-kubernetes(700c7389-9fff-4331-9d37-6af2ff592ac5)\"" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" Jan 06 08:16:46 crc kubenswrapper[4784]: I0106 08:16:46.312089 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:46 crc kubenswrapper[4784]: E0106 08:16:46.312267 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:46 crc kubenswrapper[4784]: I0106 08:16:46.312333 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:46 crc kubenswrapper[4784]: E0106 08:16:46.312732 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:46 crc kubenswrapper[4784]: I0106 08:16:46.312968 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:46 crc kubenswrapper[4784]: E0106 08:16:46.313088 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:46 crc kubenswrapper[4784]: I0106 08:16:46.313757 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:46 crc kubenswrapper[4784]: E0106 08:16:46.313999 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:48 crc kubenswrapper[4784]: I0106 08:16:48.312108 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:48 crc kubenswrapper[4784]: I0106 08:16:48.312161 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:48 crc kubenswrapper[4784]: E0106 08:16:48.314041 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:48 crc kubenswrapper[4784]: I0106 08:16:48.314083 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:48 crc kubenswrapper[4784]: E0106 08:16:48.314366 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:48 crc kubenswrapper[4784]: E0106 08:16:48.314696 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:48 crc kubenswrapper[4784]: I0106 08:16:48.315007 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:48 crc kubenswrapper[4784]: E0106 08:16:48.315164 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:50 crc kubenswrapper[4784]: I0106 08:16:50.311469 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:50 crc kubenswrapper[4784]: I0106 08:16:50.311613 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:50 crc kubenswrapper[4784]: E0106 08:16:50.311681 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:50 crc kubenswrapper[4784]: E0106 08:16:50.311797 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:50 crc kubenswrapper[4784]: I0106 08:16:50.311897 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:50 crc kubenswrapper[4784]: E0106 08:16:50.311982 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:50 crc kubenswrapper[4784]: I0106 08:16:50.312057 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:50 crc kubenswrapper[4784]: E0106 08:16:50.312129 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:52 crc kubenswrapper[4784]: I0106 08:16:52.313050 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:52 crc kubenswrapper[4784]: E0106 08:16:52.313660 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:52 crc kubenswrapper[4784]: I0106 08:16:52.313174 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:52 crc kubenswrapper[4784]: I0106 08:16:52.313096 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:52 crc kubenswrapper[4784]: I0106 08:16:52.313414 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:52 crc kubenswrapper[4784]: E0106 08:16:52.317037 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:52 crc kubenswrapper[4784]: E0106 08:16:52.316870 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:52 crc kubenswrapper[4784]: E0106 08:16:52.318291 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:54 crc kubenswrapper[4784]: I0106 08:16:54.311703 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:54 crc kubenswrapper[4784]: I0106 08:16:54.311813 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:54 crc kubenswrapper[4784]: I0106 08:16:54.311823 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:54 crc kubenswrapper[4784]: E0106 08:16:54.312223 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:54 crc kubenswrapper[4784]: E0106 08:16:54.312699 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:54 crc kubenswrapper[4784]: E0106 08:16:54.312799 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:54 crc kubenswrapper[4784]: I0106 08:16:54.312836 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:54 crc kubenswrapper[4784]: E0106 08:16:54.313012 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:54 crc kubenswrapper[4784]: I0106 08:16:54.981109 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-l2xdd_85f24cc3-ceca-49ce-b774-32e773e72c1a/kube-multus/1.log" Jan 06 08:16:54 crc kubenswrapper[4784]: I0106 08:16:54.982140 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-l2xdd_85f24cc3-ceca-49ce-b774-32e773e72c1a/kube-multus/0.log" Jan 06 08:16:54 crc kubenswrapper[4784]: I0106 08:16:54.982217 4784 generic.go:334] "Generic (PLEG): container finished" podID="85f24cc3-ceca-49ce-b774-32e773e72c1a" containerID="3a19c109a46a0e3207cb4071747c8e1f3148870f185be59d729052b2547792c1" exitCode=1 Jan 06 08:16:54 crc kubenswrapper[4784]: I0106 08:16:54.982262 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-l2xdd" event={"ID":"85f24cc3-ceca-49ce-b774-32e773e72c1a","Type":"ContainerDied","Data":"3a19c109a46a0e3207cb4071747c8e1f3148870f185be59d729052b2547792c1"} Jan 06 08:16:54 crc kubenswrapper[4784]: I0106 08:16:54.982316 4784 scope.go:117] "RemoveContainer" containerID="1e5ef6d86770c86ddc9b0fcc45ce5b8d4bca464b759f00f105505bd6adfbc11e" Jan 06 08:16:54 crc kubenswrapper[4784]: I0106 08:16:54.983612 4784 scope.go:117] "RemoveContainer" containerID="3a19c109a46a0e3207cb4071747c8e1f3148870f185be59d729052b2547792c1" Jan 06 08:16:54 crc kubenswrapper[4784]: E0106 08:16:54.983914 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-l2xdd_openshift-multus(85f24cc3-ceca-49ce-b774-32e773e72c1a)\"" pod="openshift-multus/multus-l2xdd" podUID="85f24cc3-ceca-49ce-b774-32e773e72c1a" Jan 06 08:16:55 crc kubenswrapper[4784]: I0106 08:16:55.988618 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-l2xdd_85f24cc3-ceca-49ce-b774-32e773e72c1a/kube-multus/1.log" Jan 06 08:16:56 crc kubenswrapper[4784]: I0106 08:16:56.311981 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:56 crc kubenswrapper[4784]: I0106 08:16:56.312032 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:56 crc kubenswrapper[4784]: I0106 08:16:56.312058 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:56 crc kubenswrapper[4784]: E0106 08:16:56.312244 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:56 crc kubenswrapper[4784]: E0106 08:16:56.312641 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:56 crc kubenswrapper[4784]: I0106 08:16:56.312032 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:56 crc kubenswrapper[4784]: E0106 08:16:56.312871 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:56 crc kubenswrapper[4784]: E0106 08:16:56.313065 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:56 crc kubenswrapper[4784]: I0106 08:16:56.314041 4784 scope.go:117] "RemoveContainer" containerID="c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df" Jan 06 08:16:56 crc kubenswrapper[4784]: I0106 08:16:56.994321 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/3.log" Jan 06 08:16:56 crc kubenswrapper[4784]: I0106 08:16:56.996397 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerStarted","Data":"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4"} Jan 06 08:16:56 crc kubenswrapper[4784]: I0106 08:16:56.997954 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:16:57 crc kubenswrapper[4784]: I0106 08:16:57.042083 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podStartSLOduration=96.042066892 podStartE2EDuration="1m36.042066892s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:16:57.040614317 +0000 UTC m=+119.086787174" watchObservedRunningTime="2026-01-06 08:16:57.042066892 +0000 UTC m=+119.088239719" Jan 06 08:16:57 crc kubenswrapper[4784]: I0106 08:16:57.329231 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-xfktc"] Jan 06 08:16:57 crc kubenswrapper[4784]: I0106 08:16:57.329406 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:57 crc kubenswrapper[4784]: E0106 08:16:57.329582 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:16:58 crc kubenswrapper[4784]: E0106 08:16:58.259470 4784 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 06 08:16:58 crc kubenswrapper[4784]: I0106 08:16:58.311954 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:16:58 crc kubenswrapper[4784]: I0106 08:16:58.312019 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:16:58 crc kubenswrapper[4784]: E0106 08:16:58.313847 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:16:58 crc kubenswrapper[4784]: I0106 08:16:58.313877 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:16:58 crc kubenswrapper[4784]: E0106 08:16:58.313959 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:16:58 crc kubenswrapper[4784]: E0106 08:16:58.314043 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:16:58 crc kubenswrapper[4784]: E0106 08:16:58.417051 4784 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 06 08:16:59 crc kubenswrapper[4784]: I0106 08:16:59.311464 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:16:59 crc kubenswrapper[4784]: E0106 08:16:59.311618 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:17:00 crc kubenswrapper[4784]: I0106 08:17:00.311833 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:17:00 crc kubenswrapper[4784]: I0106 08:17:00.311917 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:17:00 crc kubenswrapper[4784]: I0106 08:17:00.311846 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:17:00 crc kubenswrapper[4784]: E0106 08:17:00.312016 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:17:00 crc kubenswrapper[4784]: E0106 08:17:00.312226 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:17:00 crc kubenswrapper[4784]: E0106 08:17:00.312358 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:17:01 crc kubenswrapper[4784]: I0106 08:17:01.311774 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:17:01 crc kubenswrapper[4784]: E0106 08:17:01.311931 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:17:02 crc kubenswrapper[4784]: I0106 08:17:02.311516 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:17:02 crc kubenswrapper[4784]: I0106 08:17:02.311599 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:17:02 crc kubenswrapper[4784]: E0106 08:17:02.311731 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:17:02 crc kubenswrapper[4784]: I0106 08:17:02.311767 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:17:02 crc kubenswrapper[4784]: E0106 08:17:02.311944 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:17:02 crc kubenswrapper[4784]: E0106 08:17:02.312072 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:17:03 crc kubenswrapper[4784]: I0106 08:17:03.311836 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:17:03 crc kubenswrapper[4784]: E0106 08:17:03.312140 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:17:03 crc kubenswrapper[4784]: E0106 08:17:03.418372 4784 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 06 08:17:04 crc kubenswrapper[4784]: I0106 08:17:04.311860 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:17:04 crc kubenswrapper[4784]: I0106 08:17:04.311900 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:17:04 crc kubenswrapper[4784]: I0106 08:17:04.311951 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:17:04 crc kubenswrapper[4784]: E0106 08:17:04.311997 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:17:04 crc kubenswrapper[4784]: E0106 08:17:04.312083 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:17:04 crc kubenswrapper[4784]: E0106 08:17:04.312150 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:17:05 crc kubenswrapper[4784]: I0106 08:17:05.311670 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:17:05 crc kubenswrapper[4784]: E0106 08:17:05.311898 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:17:06 crc kubenswrapper[4784]: I0106 08:17:06.311534 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:17:06 crc kubenswrapper[4784]: I0106 08:17:06.311628 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:17:06 crc kubenswrapper[4784]: E0106 08:17:06.312108 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:17:06 crc kubenswrapper[4784]: E0106 08:17:06.312253 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:17:06 crc kubenswrapper[4784]: I0106 08:17:06.311673 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:17:06 crc kubenswrapper[4784]: E0106 08:17:06.312390 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:17:07 crc kubenswrapper[4784]: I0106 08:17:07.312269 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:17:07 crc kubenswrapper[4784]: E0106 08:17:07.312517 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:17:08 crc kubenswrapper[4784]: I0106 08:17:08.311536 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:17:08 crc kubenswrapper[4784]: I0106 08:17:08.311639 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:17:08 crc kubenswrapper[4784]: E0106 08:17:08.312980 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:17:08 crc kubenswrapper[4784]: I0106 08:17:08.313033 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:17:08 crc kubenswrapper[4784]: E0106 08:17:08.313207 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:17:08 crc kubenswrapper[4784]: E0106 08:17:08.313328 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:17:08 crc kubenswrapper[4784]: E0106 08:17:08.419115 4784 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 06 08:17:09 crc kubenswrapper[4784]: I0106 08:17:09.311598 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:17:09 crc kubenswrapper[4784]: E0106 08:17:09.311807 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:17:10 crc kubenswrapper[4784]: I0106 08:17:10.311855 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:17:10 crc kubenswrapper[4784]: E0106 08:17:10.312033 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:17:10 crc kubenswrapper[4784]: I0106 08:17:10.312119 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:17:10 crc kubenswrapper[4784]: I0106 08:17:10.311876 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:17:10 crc kubenswrapper[4784]: E0106 08:17:10.312522 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:17:10 crc kubenswrapper[4784]: E0106 08:17:10.312788 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:17:10 crc kubenswrapper[4784]: I0106 08:17:10.313123 4784 scope.go:117] "RemoveContainer" containerID="3a19c109a46a0e3207cb4071747c8e1f3148870f185be59d729052b2547792c1" Jan 06 08:17:11 crc kubenswrapper[4784]: I0106 08:17:11.050014 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-l2xdd_85f24cc3-ceca-49ce-b774-32e773e72c1a/kube-multus/1.log" Jan 06 08:17:11 crc kubenswrapper[4784]: I0106 08:17:11.050069 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-l2xdd" event={"ID":"85f24cc3-ceca-49ce-b774-32e773e72c1a","Type":"ContainerStarted","Data":"64398024d93df89dcd6c9902c58da525acda8a2b395f1b0a0bf4a25d313c68a6"} Jan 06 08:17:11 crc kubenswrapper[4784]: I0106 08:17:11.311530 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:17:11 crc kubenswrapper[4784]: E0106 08:17:11.311780 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:17:12 crc kubenswrapper[4784]: I0106 08:17:12.314751 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:17:12 crc kubenswrapper[4784]: E0106 08:17:12.314916 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 06 08:17:12 crc kubenswrapper[4784]: I0106 08:17:12.315047 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:17:12 crc kubenswrapper[4784]: I0106 08:17:12.315046 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:17:12 crc kubenswrapper[4784]: E0106 08:17:12.315263 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 06 08:17:12 crc kubenswrapper[4784]: E0106 08:17:12.315382 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 06 08:17:13 crc kubenswrapper[4784]: I0106 08:17:13.312167 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:17:13 crc kubenswrapper[4784]: E0106 08:17:13.312370 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xfktc" podUID="e957a369-1cc7-450b-821f-3ee12341caef" Jan 06 08:17:14 crc kubenswrapper[4784]: I0106 08:17:14.774205 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:17:14 crc kubenswrapper[4784]: I0106 08:17:14.775272 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:17:14 crc kubenswrapper[4784]: I0106 08:17:14.775489 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:17:14 crc kubenswrapper[4784]: I0106 08:17:14.777023 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:17:14 crc kubenswrapper[4784]: I0106 08:17:14.780091 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 06 08:17:14 crc kubenswrapper[4784]: I0106 08:17:14.781743 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 06 08:17:14 crc kubenswrapper[4784]: I0106 08:17:14.781749 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 06 08:17:14 crc kubenswrapper[4784]: I0106 08:17:14.785114 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 06 08:17:14 crc kubenswrapper[4784]: I0106 08:17:14.785386 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 06 08:17:14 crc kubenswrapper[4784]: I0106 08:17:14.785615 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.128024 4784 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.179656 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-nm6wl"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.180913 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.180971 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-9tb2r"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.181884 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.202018 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.202585 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.204075 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.205106 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.205318 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.205684 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.207484 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.227255 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.228924 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.229476 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.230219 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.230668 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.231590 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.231886 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.232087 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.232199 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-x5q85"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.232785 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.233116 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.233319 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.233967 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.238343 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-q2d7x"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.238971 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.241533 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.244472 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.245213 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.245342 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-gr79s"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.246216 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.246476 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.246698 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.246786 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.246803 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.246878 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.246940 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.247019 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.247115 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.247307 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.247318 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.247310 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.247422 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.248298 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.253001 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.253623 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.260382 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rhgh4"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.261203 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.266264 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.267108 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.268712 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.274895 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.275888 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.276286 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.276415 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.276510 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.276600 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.276650 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.276740 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.276755 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.276835 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.276881 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.276945 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.276991 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.277055 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.277169 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.277203 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.277314 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.277467 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.277614 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.277733 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.277757 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.277829 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.277921 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.278004 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.279515 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.279738 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.279902 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.280075 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.280252 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.280411 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.282202 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-fxbll"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.293374 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.296495 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.296764 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.296914 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.297341 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.297871 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.298446 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.301518 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.306827 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.307597 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.308094 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-l8w5x"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.308158 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.312631 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-fxbll" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.324862 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.326925 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.329663 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330232 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-config\") pod \"controller-manager-879f6c89f-9tb2r\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330261 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-bound-sa-token\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330281 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-oauth-config\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330297 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330313 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/722dd671-6776-4731-8055-795772f78c77-available-featuregates\") pod \"openshift-config-operator-7777fb866f-gr79s\" (UID: \"722dd671-6776-4731-8055-795772f78c77\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330328 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-9tb2r\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330351 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2cb40f53-37df-4f44-9bd6-cfb855f08935-encryption-config\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330366 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/75e1968d-8eee-46f2-b737-f33e8e48fbfd-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-qqglh\" (UID: \"75e1968d-8eee-46f2-b737-f33e8e48fbfd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330380 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-registry-tls\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330398 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c0033eba-a627-460c-b782-04628acbadcf-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-x5q85\" (UID: \"c0033eba-a627-460c-b782-04628acbadcf\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330426 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f9a7a2ab-be43-46e9-a5a6-21c3100a55ef-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-nb4n9\" (UID: \"f9a7a2ab-be43-46e9-a5a6-21c3100a55ef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330448 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330466 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/58b87341-53f1-4b38-807b-964e45e69986-client-ca\") pod \"route-controller-manager-6576b87f9c-2tmmv\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330490 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9a7a2ab-be43-46e9-a5a6-21c3100a55ef-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-nb4n9\" (UID: \"f9a7a2ab-be43-46e9-a5a6-21c3100a55ef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330517 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgt7f\" (UniqueName: \"kubernetes.io/projected/58b87341-53f1-4b38-807b-964e45e69986-kube-api-access-vgt7f\") pod \"route-controller-manager-6576b87f9c-2tmmv\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330531 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75e1968d-8eee-46f2-b737-f33e8e48fbfd-config\") pod \"openshift-apiserver-operator-796bbdcf4f-qqglh\" (UID: \"75e1968d-8eee-46f2-b737-f33e8e48fbfd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330561 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6qx9\" (UniqueName: \"kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-kube-api-access-c6qx9\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330579 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2vcs\" (UniqueName: \"kubernetes.io/projected/ff8d015b-29e9-47bf-8735-eec268cb7d3a-kube-api-access-p2vcs\") pod \"controller-manager-879f6c89f-9tb2r\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330599 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-oauth-serving-cert\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330615 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2cb40f53-37df-4f44-9bd6-cfb855f08935-etcd-serving-ca\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330630 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58b87341-53f1-4b38-807b-964e45e69986-serving-cert\") pod \"route-controller-manager-6576b87f9c-2tmmv\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330647 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-config\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330663 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2cb40f53-37df-4f44-9bd6-cfb855f08935-serving-cert\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330884 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-serving-cert\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330948 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/722dd671-6776-4731-8055-795772f78c77-serving-cert\") pod \"openshift-config-operator-7777fb866f-gr79s\" (UID: \"722dd671-6776-4731-8055-795772f78c77\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.330974 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-registry-certificates\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331015 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c0033eba-a627-460c-b782-04628acbadcf-images\") pod \"machine-api-operator-5694c8668f-x5q85\" (UID: \"c0033eba-a627-460c-b782-04628acbadcf\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331065 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-trusted-ca-bundle\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331105 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2cb40f53-37df-4f44-9bd6-cfb855f08935-trusted-ca-bundle\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: E0106 08:17:22.331228 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:22.831209598 +0000 UTC m=+144.877382435 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331273 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8qbm\" (UniqueName: \"kubernetes.io/projected/2cb40f53-37df-4f44-9bd6-cfb855f08935-kube-api-access-w8qbm\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331313 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331335 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7ns5\" (UniqueName: \"kubernetes.io/projected/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-kube-api-access-f7ns5\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331355 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/2cb40f53-37df-4f44-9bd6-cfb855f08935-audit\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331377 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58b87341-53f1-4b38-807b-964e45e69986-config\") pod \"route-controller-manager-6576b87f9c-2tmmv\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331417 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0033eba-a627-460c-b782-04628acbadcf-config\") pod \"machine-api-operator-5694c8668f-x5q85\" (UID: \"c0033eba-a627-460c-b782-04628acbadcf\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331439 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2cb40f53-37df-4f44-9bd6-cfb855f08935-etcd-client\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331467 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9a7a2ab-be43-46e9-a5a6-21c3100a55ef-config\") pod \"kube-apiserver-operator-766d6c64bb-nb4n9\" (UID: \"f9a7a2ab-be43-46e9-a5a6-21c3100a55ef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331487 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2cb40f53-37df-4f44-9bd6-cfb855f08935-audit-dir\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331519 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvgdj\" (UniqueName: \"kubernetes.io/projected/722dd671-6776-4731-8055-795772f78c77-kube-api-access-xvgdj\") pod \"openshift-config-operator-7777fb866f-gr79s\" (UID: \"722dd671-6776-4731-8055-795772f78c77\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331586 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/2cb40f53-37df-4f44-9bd6-cfb855f08935-image-import-ca\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331615 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ff8d015b-29e9-47bf-8735-eec268cb7d3a-serving-cert\") pod \"controller-manager-879f6c89f-9tb2r\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331655 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j46dc\" (UniqueName: \"kubernetes.io/projected/c0033eba-a627-460c-b782-04628acbadcf-kube-api-access-j46dc\") pod \"machine-api-operator-5694c8668f-x5q85\" (UID: \"c0033eba-a627-460c-b782-04628acbadcf\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331691 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdfv8\" (UniqueName: \"kubernetes.io/projected/75e1968d-8eee-46f2-b737-f33e8e48fbfd-kube-api-access-mdfv8\") pod \"openshift-apiserver-operator-796bbdcf4f-qqglh\" (UID: \"75e1968d-8eee-46f2-b737-f33e8e48fbfd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331713 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/2cb40f53-37df-4f44-9bd6-cfb855f08935-node-pullsecrets\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331732 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-trusted-ca\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331797 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-service-ca\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331823 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cb40f53-37df-4f44-9bd6-cfb855f08935-config\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.331842 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-client-ca\") pod \"controller-manager-879f6c89f-9tb2r\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.339381 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.339581 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.339790 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-krcbh"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.340291 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mthsj"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.340566 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.340742 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-krcbh" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.340823 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.341195 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pln6n"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.341224 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.341447 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qpchx"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.341682 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.341733 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.341773 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.341991 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-9tb2r"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.342015 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qpchx" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.341735 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.342067 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.342084 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.342866 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.343881 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.344140 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.344390 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.348063 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jprht"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.348502 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-pl4c4"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.348605 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.348848 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.349127 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-jprht" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.349467 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.349771 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.349948 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.350083 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.350225 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.350324 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.350649 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.350766 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.350865 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.351511 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.351643 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.352021 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.352191 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.357137 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.359582 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.359746 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.359849 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.359937 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.358586 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.358692 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.358721 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.358787 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.359363 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.364688 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.365046 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.365206 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.368964 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.373078 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.373719 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.377611 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.378291 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.379012 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.380460 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.381543 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-8c87n"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.383457 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.385781 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.390511 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.391374 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.393619 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.394201 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.395174 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-s54vm"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.396066 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-s54vm" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.396514 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-qn96z"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.399427 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.399714 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qn96z" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.400224 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.400536 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.403493 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.405432 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-s84bx"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.406307 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.406593 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.406763 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-s84bx" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.407580 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vkt2h"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.407870 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.408338 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-mdssl"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.408465 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.409242 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-fxbll"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.409263 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.409310 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-mdssl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.410094 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-gr79s"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.411269 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mthsj"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.412813 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-x5q85"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.413793 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.414798 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-q2d7x"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.416284 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-nm6wl"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.417393 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.418337 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-jcbhk"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.419041 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jcbhk" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.419503 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.420582 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.421541 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.422814 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-pl4c4"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.423079 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.423754 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.424963 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pln6n"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.425901 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-krcbh"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.426860 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.427802 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.428815 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-l8w5x"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.429765 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.430833 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9qntj"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.431780 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-z56qx"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.431938 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432282 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:22 crc kubenswrapper[4784]: E0106 08:17:22.432446 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:22.932416463 +0000 UTC m=+144.978589300 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432498 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432569 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432598 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9a7a2ab-be43-46e9-a5a6-21c3100a55ef-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-nb4n9\" (UID: \"f9a7a2ab-be43-46e9-a5a6-21c3100a55ef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432625 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75e1968d-8eee-46f2-b737-f33e8e48fbfd-config\") pod \"openshift-apiserver-operator-796bbdcf4f-qqglh\" (UID: \"75e1968d-8eee-46f2-b737-f33e8e48fbfd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432647 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6qx9\" (UniqueName: \"kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-kube-api-access-c6qx9\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432671 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/86a7348c-24b4-4138-83a9-0587e28e72e4-audit-dir\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432719 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-z56qx" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432754 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2vcs\" (UniqueName: \"kubernetes.io/projected/ff8d015b-29e9-47bf-8735-eec268cb7d3a-kube-api-access-p2vcs\") pod \"controller-manager-879f6c89f-9tb2r\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:22 crc kubenswrapper[4784]: E0106 08:17:22.432794 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:22.932778757 +0000 UTC m=+144.978951594 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432823 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d92vc\" (UniqueName: \"kubernetes.io/projected/3c94b199-d756-4ba6-8b21-a491a98cc75b-kube-api-access-d92vc\") pod \"ingress-operator-5b745b69d9-l4z4z\" (UID: \"3c94b199-d756-4ba6-8b21-a491a98cc75b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432850 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/21ac7bdf-1788-4bc1-b777-5eb6290b4fb8-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-pcmvk\" (UID: \"21ac7bdf-1788-4bc1-b777-5eb6290b4fb8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432873 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2cb40f53-37df-4f44-9bd6-cfb855f08935-etcd-serving-ca\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432894 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58b87341-53f1-4b38-807b-964e45e69986-serving-cert\") pod \"route-controller-manager-6576b87f9c-2tmmv\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432921 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-config\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432946 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2cb40f53-37df-4f44-9bd6-cfb855f08935-serving-cert\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.432968 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a595e387-19b3-41f5-9330-3320991a6ca7-proxy-tls\") pod \"machine-config-operator-74547568cd-hkq78\" (UID: \"a595e387-19b3-41f5-9330-3320991a6ca7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.433002 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-km4vk\" (UniqueName: \"kubernetes.io/projected/d9a30f78-3dc1-4289-9560-c385125df7de-kube-api-access-km4vk\") pod \"control-plane-machine-set-operator-78cbb6b69f-qpchx\" (UID: \"d9a30f78-3dc1-4289-9560-c385125df7de\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qpchx" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.433448 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75e1968d-8eee-46f2-b737-f33e8e48fbfd-config\") pod \"openshift-apiserver-operator-796bbdcf4f-qqglh\" (UID: \"75e1968d-8eee-46f2-b737-f33e8e48fbfd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.433725 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.433759 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.433787 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/722dd671-6776-4731-8055-795772f78c77-serving-cert\") pod \"openshift-config-operator-7777fb866f-gr79s\" (UID: \"722dd671-6776-4731-8055-795772f78c77\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.433836 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0d67a630-478b-447f-8c40-6b26cbbcbe5e-etcd-service-ca\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.433858 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c0033eba-a627-460c-b782-04628acbadcf-images\") pod \"machine-api-operator-5694c8668f-x5q85\" (UID: \"c0033eba-a627-460c-b782-04628acbadcf\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.433875 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-registry-certificates\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.433898 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8qbm\" (UniqueName: \"kubernetes.io/projected/2cb40f53-37df-4f44-9bd6-cfb855f08935-kube-api-access-w8qbm\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.433894 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-config\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.433921 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-audit-policies\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.433939 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9v56d\" (UniqueName: \"kubernetes.io/projected/b1d67235-5081-4c66-acaa-0620c30e170e-kube-api-access-9v56d\") pod \"router-default-5444994796-8c87n\" (UID: \"b1d67235-5081-4c66-acaa-0620c30e170e\") " pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.433892 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2cb40f53-37df-4f44-9bd6-cfb855f08935-etcd-serving-ca\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434134 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434162 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c6d40e7-7e34-46bc-9226-307ed8f18a90-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-9spjg\" (UID: \"1c6d40e7-7e34-46bc-9226-307ed8f18a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434216 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0033eba-a627-460c-b782-04628acbadcf-config\") pod \"machine-api-operator-5694c8668f-x5q85\" (UID: \"c0033eba-a627-460c-b782-04628acbadcf\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434240 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434264 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/156ba40d-dabf-4f2e-95ce-af4b456b78ac-machine-approver-tls\") pod \"machine-approver-56656f9798-j5jb7\" (UID: \"156ba40d-dabf-4f2e-95ce-af4b456b78ac\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434310 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2cb40f53-37df-4f44-9bd6-cfb855f08935-audit-dir\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434330 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-etcd-client\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434351 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/d9a30f78-3dc1-4289-9560-c385125df7de-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-qpchx\" (UID: \"d9a30f78-3dc1-4289-9560-c385125df7de\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qpchx" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434374 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlwtv\" (UniqueName: \"kubernetes.io/projected/967f3bd4-283b-4918-b8d9-f0c810321a3d-kube-api-access-qlwtv\") pod \"catalog-operator-68c6474976-r2xv6\" (UID: \"967f3bd4-283b-4918-b8d9-f0c810321a3d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434439 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvgdj\" (UniqueName: \"kubernetes.io/projected/722dd671-6776-4731-8055-795772f78c77-kube-api-access-xvgdj\") pod \"openshift-config-operator-7777fb866f-gr79s\" (UID: \"722dd671-6776-4731-8055-795772f78c77\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434448 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2cb40f53-37df-4f44-9bd6-cfb855f08935-audit-dir\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434462 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xf66\" (UniqueName: \"kubernetes.io/projected/e9337b68-6ba6-41fb-8b01-fe8e77a6a051-kube-api-access-7xf66\") pod \"migrator-59844c95c7-qn96z\" (UID: \"e9337b68-6ba6-41fb-8b01-fe8e77a6a051\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qn96z" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434489 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/967f3bd4-283b-4918-b8d9-f0c810321a3d-profile-collector-cert\") pod \"catalog-operator-68c6474976-r2xv6\" (UID: \"967f3bd4-283b-4918-b8d9-f0c810321a3d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434518 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ff8d015b-29e9-47bf-8735-eec268cb7d3a-serving-cert\") pod \"controller-manager-879f6c89f-9tb2r\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434589 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-serving-cert\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434611 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434661 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c0033eba-a627-460c-b782-04628acbadcf-images\") pod \"machine-api-operator-5694c8668f-x5q85\" (UID: \"c0033eba-a627-460c-b782-04628acbadcf\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434741 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j46dc\" (UniqueName: \"kubernetes.io/projected/c0033eba-a627-460c-b782-04628acbadcf-kube-api-access-j46dc\") pod \"machine-api-operator-5694c8668f-x5q85\" (UID: \"c0033eba-a627-460c-b782-04628acbadcf\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434777 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-audit-dir\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434800 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/593d3e61-46d7-4a61-baba-4b129ad61754-metrics-tls\") pod \"dns-operator-744455d44c-jprht\" (UID: \"593d3e61-46d7-4a61-baba-4b129ad61754\") " pod="openshift-dns-operator/dns-operator-744455d44c-jprht" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434814 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434832 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdfv8\" (UniqueName: \"kubernetes.io/projected/75e1968d-8eee-46f2-b737-f33e8e48fbfd-kube-api-access-mdfv8\") pod \"openshift-apiserver-operator-796bbdcf4f-qqglh\" (UID: \"75e1968d-8eee-46f2-b737-f33e8e48fbfd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434862 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/2cb40f53-37df-4f44-9bd6-cfb855f08935-node-pullsecrets\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.434971 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/2cb40f53-37df-4f44-9bd6-cfb855f08935-node-pullsecrets\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.435006 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-service-ca\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.435037 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cb40f53-37df-4f44-9bd6-cfb855f08935-config\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.435071 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/967f3bd4-283b-4918-b8d9-f0c810321a3d-srv-cert\") pod \"catalog-operator-68c6474976-r2xv6\" (UID: \"967f3bd4-283b-4918-b8d9-f0c810321a3d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.435185 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0c8329e-40b2-4f88-8c03-9405383a057d-config\") pod \"console-operator-58897d9998-l8w5x\" (UID: \"c0c8329e-40b2-4f88-8c03-9405383a057d\") " pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.435222 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9v88\" (UniqueName: \"kubernetes.io/projected/480d6bb7-ba40-4925-bdab-c536df24feb3-kube-api-access-m9v88\") pod \"packageserver-d55dfcdfc-6bjr5\" (UID: \"480d6bb7-ba40-4925-bdab-c536df24feb3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.435249 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/b1d67235-5081-4c66-acaa-0620c30e170e-stats-auth\") pod \"router-default-5444994796-8c87n\" (UID: \"b1d67235-5081-4c66-acaa-0620c30e170e\") " pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.435274 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwnnp\" (UniqueName: \"kubernetes.io/projected/a595e387-19b3-41f5-9330-3320991a6ca7-kube-api-access-xwnnp\") pod \"machine-config-operator-74547568cd-hkq78\" (UID: \"a595e387-19b3-41f5-9330-3320991a6ca7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.435309 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-oauth-config\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.435343 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-9tb2r\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.435373 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/b1d67235-5081-4c66-acaa-0620c30e170e-default-certificate\") pod \"router-default-5444994796-8c87n\" (UID: \"b1d67235-5081-4c66-acaa-0620c30e170e\") " pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.435391 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a595e387-19b3-41f5-9330-3320991a6ca7-auth-proxy-config\") pod \"machine-config-operator-74547568cd-hkq78\" (UID: \"a595e387-19b3-41f5-9330-3320991a6ca7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.435792 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cb40f53-37df-4f44-9bd6-cfb855f08935-config\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.436022 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-service-ca\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.436159 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-registry-certificates\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.436830 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/75e1968d-8eee-46f2-b737-f33e8e48fbfd-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-qqglh\" (UID: \"75e1968d-8eee-46f2-b737-f33e8e48fbfd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.436886 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-registry-tls\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.437028 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2cb40f53-37df-4f44-9bd6-cfb855f08935-encryption-config\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.437233 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0033eba-a627-460c-b782-04628acbadcf-config\") pod \"machine-api-operator-5694c8668f-x5q85\" (UID: \"c0033eba-a627-460c-b782-04628acbadcf\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.438526 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-9tb2r\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.438763 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f9a7a2ab-be43-46e9-a5a6-21c3100a55ef-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-nb4n9\" (UID: \"f9a7a2ab-be43-46e9-a5a6-21c3100a55ef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.438939 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2wbt\" (UniqueName: \"kubernetes.io/projected/c0c8329e-40b2-4f88-8c03-9405383a057d-kube-api-access-p2wbt\") pod \"console-operator-58897d9998-l8w5x\" (UID: \"c0c8329e-40b2-4f88-8c03-9405383a057d\") " pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.438981 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439010 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0c8329e-40b2-4f88-8c03-9405383a057d-serving-cert\") pod \"console-operator-58897d9998-l8w5x\" (UID: \"c0c8329e-40b2-4f88-8c03-9405383a057d\") " pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439051 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxmd4\" (UniqueName: \"kubernetes.io/projected/1c6d40e7-7e34-46bc-9226-307ed8f18a90-kube-api-access-xxmd4\") pod \"kube-storage-version-migrator-operator-b67b599dd-9spjg\" (UID: \"1c6d40e7-7e34-46bc-9226-307ed8f18a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439293 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/58b87341-53f1-4b38-807b-964e45e69986-client-ca\") pod \"route-controller-manager-6576b87f9c-2tmmv\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439358 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/480d6bb7-ba40-4925-bdab-c536df24feb3-webhook-cert\") pod \"packageserver-d55dfcdfc-6bjr5\" (UID: \"480d6bb7-ba40-4925-bdab-c536df24feb3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439425 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgt7f\" (UniqueName: \"kubernetes.io/projected/58b87341-53f1-4b38-807b-964e45e69986-kube-api-access-vgt7f\") pod \"route-controller-manager-6576b87f9c-2tmmv\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439454 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d67a630-478b-447f-8c40-6b26cbbcbe5e-serving-cert\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439477 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1c6d40e7-7e34-46bc-9226-307ed8f18a90-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-9spjg\" (UID: \"1c6d40e7-7e34-46bc-9226-307ed8f18a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439504 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thhtw\" (UniqueName: \"kubernetes.io/projected/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-kube-api-access-thhtw\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439538 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439595 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439614 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21ac7bdf-1788-4bc1-b777-5eb6290b4fb8-config\") pod \"kube-controller-manager-operator-78b949d7b-pcmvk\" (UID: \"21ac7bdf-1788-4bc1-b777-5eb6290b4fb8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439640 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3c94b199-d756-4ba6-8b21-a491a98cc75b-trusted-ca\") pod \"ingress-operator-5b745b69d9-l4z4z\" (UID: \"3c94b199-d756-4ba6-8b21-a491a98cc75b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439660 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3c94b199-d756-4ba6-8b21-a491a98cc75b-bound-sa-token\") pod \"ingress-operator-5b745b69d9-l4z4z\" (UID: \"3c94b199-d756-4ba6-8b21-a491a98cc75b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439684 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-oauth-serving-cert\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439708 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/62ee35e0-2668-4abf-8984-0da411603434-srv-cert\") pod \"olm-operator-6b444d44fb-z2j87\" (UID: \"62ee35e0-2668-4abf-8984-0da411603434\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439724 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6xk5\" (UniqueName: \"kubernetes.io/projected/62ee35e0-2668-4abf-8984-0da411603434-kube-api-access-l6xk5\") pod \"olm-operator-6b444d44fb-z2j87\" (UID: \"62ee35e0-2668-4abf-8984-0da411603434\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439782 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439845 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c0c8329e-40b2-4f88-8c03-9405383a057d-trusted-ca\") pod \"console-operator-58897d9998-l8w5x\" (UID: \"c0c8329e-40b2-4f88-8c03-9405383a057d\") " pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439869 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/21ac7bdf-1788-4bc1-b777-5eb6290b4fb8-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-pcmvk\" (UID: \"21ac7bdf-1788-4bc1-b777-5eb6290b4fb8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439910 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/480d6bb7-ba40-4925-bdab-c536df24feb3-tmpfs\") pod \"packageserver-d55dfcdfc-6bjr5\" (UID: \"480d6bb7-ba40-4925-bdab-c536df24feb3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.439955 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0d67a630-478b-447f-8c40-6b26cbbcbe5e-etcd-client\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.440035 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/480d6bb7-ba40-4925-bdab-c536df24feb3-apiservice-cert\") pod \"packageserver-d55dfcdfc-6bjr5\" (UID: \"480d6bb7-ba40-4925-bdab-c536df24feb3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.440200 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7pg9\" (UniqueName: \"kubernetes.io/projected/593d3e61-46d7-4a61-baba-4b129ad61754-kube-api-access-w7pg9\") pod \"dns-operator-744455d44c-jprht\" (UID: \"593d3e61-46d7-4a61-baba-4b129ad61754\") " pod="openshift-dns-operator/dns-operator-744455d44c-jprht" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.440258 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-serving-cert\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.440285 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d67a630-478b-447f-8c40-6b26cbbcbe5e-config\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.440316 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/156ba40d-dabf-4f2e-95ce-af4b456b78ac-config\") pod \"machine-approver-56656f9798-j5jb7\" (UID: \"156ba40d-dabf-4f2e-95ce-af4b456b78ac\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.440344 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-trusted-ca-bundle\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.440356 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/722dd671-6776-4731-8055-795772f78c77-serving-cert\") pod \"openshift-config-operator-7777fb866f-gr79s\" (UID: \"722dd671-6776-4731-8055-795772f78c77\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.440363 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2cb40f53-37df-4f44-9bd6-cfb855f08935-trusted-ca-bundle\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.440420 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/58b87341-53f1-4b38-807b-964e45e69986-client-ca\") pod \"route-controller-manager-6576b87f9c-2tmmv\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.440581 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.440802 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/75e1968d-8eee-46f2-b737-f33e8e48fbfd-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-qqglh\" (UID: \"75e1968d-8eee-46f2-b737-f33e8e48fbfd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.441325 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-oauth-config\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.441400 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/2cb40f53-37df-4f44-9bd6-cfb855f08935-audit\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.441451 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58b87341-53f1-4b38-807b-964e45e69986-config\") pod \"route-controller-manager-6576b87f9c-2tmmv\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.441454 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-oauth-serving-cert\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.441479 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpdkx\" (UniqueName: \"kubernetes.io/projected/156ba40d-dabf-4f2e-95ce-af4b456b78ac-kube-api-access-jpdkx\") pod \"machine-approver-56656f9798-j5jb7\" (UID: \"156ba40d-dabf-4f2e-95ce-af4b456b78ac\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.441512 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7ns5\" (UniqueName: \"kubernetes.io/projected/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-kube-api-access-f7ns5\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.441668 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58b87341-53f1-4b38-807b-964e45e69986-serving-cert\") pod \"route-controller-manager-6576b87f9c-2tmmv\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.441748 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2cb40f53-37df-4f44-9bd6-cfb855f08935-etcd-client\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.441787 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3c94b199-d756-4ba6-8b21-a491a98cc75b-metrics-tls\") pod \"ingress-operator-5b745b69d9-l4z4z\" (UID: \"3c94b199-d756-4ba6-8b21-a491a98cc75b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.441833 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9a7a2ab-be43-46e9-a5a6-21c3100a55ef-config\") pod \"kube-apiserver-operator-766d6c64bb-nb4n9\" (UID: \"f9a7a2ab-be43-46e9-a5a6-21c3100a55ef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.442115 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a595e387-19b3-41f5-9330-3320991a6ca7-images\") pod \"machine-config-operator-74547568cd-hkq78\" (UID: \"a595e387-19b3-41f5-9330-3320991a6ca7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.442180 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/2cb40f53-37df-4f44-9bd6-cfb855f08935-image-import-ca\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.442243 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/2cb40f53-37df-4f44-9bd6-cfb855f08935-audit\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.442288 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b1d67235-5081-4c66-acaa-0620c30e170e-service-ca-bundle\") pod \"router-default-5444994796-8c87n\" (UID: \"b1d67235-5081-4c66-acaa-0620c30e170e\") " pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.443975 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b1d67235-5081-4c66-acaa-0620c30e170e-metrics-certs\") pod \"router-default-5444994796-8c87n\" (UID: \"b1d67235-5081-4c66-acaa-0620c30e170e\") " pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.444046 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.443984 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/2cb40f53-37df-4f44-9bd6-cfb855f08935-image-import-ca\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.444156 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.444194 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-trusted-ca\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.444260 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-client-ca\") pod \"controller-manager-879f6c89f-9tb2r\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.444148 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9a7a2ab-be43-46e9-a5a6-21c3100a55ef-config\") pod \"kube-apiserver-operator-766d6c64bb-nb4n9\" (UID: \"f9a7a2ab-be43-46e9-a5a6-21c3100a55ef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.444388 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-config\") pod \"controller-manager-879f6c89f-9tb2r\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.444436 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/62ee35e0-2668-4abf-8984-0da411603434-profile-collector-cert\") pod \"olm-operator-6b444d44fb-z2j87\" (UID: \"62ee35e0-2668-4abf-8984-0da411603434\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.444469 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-encryption-config\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.444495 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.444527 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-bound-sa-token\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.444578 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/156ba40d-dabf-4f2e-95ce-af4b456b78ac-auth-proxy-config\") pod \"machine-approver-56656f9798-j5jb7\" (UID: \"156ba40d-dabf-4f2e-95ce-af4b456b78ac\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.444613 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2cb40f53-37df-4f44-9bd6-cfb855f08935-serving-cert\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.444718 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58b87341-53f1-4b38-807b-964e45e69986-config\") pod \"route-controller-manager-6576b87f9c-2tmmv\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.445107 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-registry-tls\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.445237 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.445464 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ff8d015b-29e9-47bf-8735-eec268cb7d3a-serving-cert\") pod \"controller-manager-879f6c89f-9tb2r\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.445476 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.445614 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.445648 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/722dd671-6776-4731-8055-795772f78c77-available-featuregates\") pod \"openshift-config-operator-7777fb866f-gr79s\" (UID: \"722dd671-6776-4731-8055-795772f78c77\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.445677 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7njct\" (UniqueName: \"kubernetes.io/projected/0d67a630-478b-447f-8c40-6b26cbbcbe5e-kube-api-access-7njct\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.445708 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-audit-policies\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.446413 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nb6d7\" (UniqueName: \"kubernetes.io/projected/86a7348c-24b4-4138-83a9-0587e28e72e4-kube-api-access-nb6d7\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.446459 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c0033eba-a627-460c-b782-04628acbadcf-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-x5q85\" (UID: \"c0033eba-a627-460c-b782-04628acbadcf\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.446490 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0d67a630-478b-447f-8c40-6b26cbbcbe5e-etcd-ca\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.447091 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-serving-cert\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.447125 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-config\") pod \"controller-manager-879f6c89f-9tb2r\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.447251 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2cb40f53-37df-4f44-9bd6-cfb855f08935-etcd-client\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.447446 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/722dd671-6776-4731-8055-795772f78c77-available-featuregates\") pod \"openshift-config-operator-7777fb866f-gr79s\" (UID: \"722dd671-6776-4731-8055-795772f78c77\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.448215 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9a7a2ab-be43-46e9-a5a6-21c3100a55ef-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-nb4n9\" (UID: \"f9a7a2ab-be43-46e9-a5a6-21c3100a55ef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.448433 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-trusted-ca-bundle\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.448616 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2cb40f53-37df-4f44-9bd6-cfb855f08935-trusted-ca-bundle\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.453497 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-client-ca\") pod \"controller-manager-879f6c89f-9tb2r\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.453644 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2cb40f53-37df-4f44-9bd6-cfb855f08935-encryption-config\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.453918 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jprht"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.455514 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c0033eba-a627-460c-b782-04628acbadcf-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-x5q85\" (UID: \"c0033eba-a627-460c-b782-04628acbadcf\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.455931 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.456084 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-trusted-ca\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.457360 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qpchx"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.459268 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rhgh4"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.460980 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.463331 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.467194 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.469108 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-s84bx"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.470391 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vkt2h"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.471594 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-mdssl"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.472872 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.474116 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9qntj"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.475284 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.476706 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.477907 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.479155 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jcbhk"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.480411 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-s54vm"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.482583 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-qn96z"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.487929 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-z56qx"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.489357 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.490279 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-x7h7q"] Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.491302 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-x7h7q" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.511640 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.523337 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.543316 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.547277 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:22 crc kubenswrapper[4784]: E0106 08:17:22.547472 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.047431648 +0000 UTC m=+145.093604495 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.547640 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpdkx\" (UniqueName: \"kubernetes.io/projected/156ba40d-dabf-4f2e-95ce-af4b456b78ac-kube-api-access-jpdkx\") pod \"machine-approver-56656f9798-j5jb7\" (UID: \"156ba40d-dabf-4f2e-95ce-af4b456b78ac\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.547697 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3c94b199-d756-4ba6-8b21-a491a98cc75b-metrics-tls\") pod \"ingress-operator-5b745b69d9-l4z4z\" (UID: \"3c94b199-d756-4ba6-8b21-a491a98cc75b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.547741 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a595e387-19b3-41f5-9330-3320991a6ca7-images\") pod \"machine-config-operator-74547568cd-hkq78\" (UID: \"a595e387-19b3-41f5-9330-3320991a6ca7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.547794 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b1d67235-5081-4c66-acaa-0620c30e170e-service-ca-bundle\") pod \"router-default-5444994796-8c87n\" (UID: \"b1d67235-5081-4c66-acaa-0620c30e170e\") " pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.547846 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b1d67235-5081-4c66-acaa-0620c30e170e-metrics-certs\") pod \"router-default-5444994796-8c87n\" (UID: \"b1d67235-5081-4c66-acaa-0620c30e170e\") " pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.547890 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.547930 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.547979 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/62ee35e0-2668-4abf-8984-0da411603434-profile-collector-cert\") pod \"olm-operator-6b444d44fb-z2j87\" (UID: \"62ee35e0-2668-4abf-8984-0da411603434\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548027 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-encryption-config\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548064 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548111 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/156ba40d-dabf-4f2e-95ce-af4b456b78ac-auth-proxy-config\") pod \"machine-approver-56656f9798-j5jb7\" (UID: \"156ba40d-dabf-4f2e-95ce-af4b456b78ac\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548165 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548211 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7njct\" (UniqueName: \"kubernetes.io/projected/0d67a630-478b-447f-8c40-6b26cbbcbe5e-kube-api-access-7njct\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548247 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-audit-policies\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548288 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nb6d7\" (UniqueName: \"kubernetes.io/projected/86a7348c-24b4-4138-83a9-0587e28e72e4-kube-api-access-nb6d7\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548331 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0d67a630-478b-447f-8c40-6b26cbbcbe5e-etcd-ca\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548376 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548465 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548543 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/86a7348c-24b4-4138-83a9-0587e28e72e4-audit-dir\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548650 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d92vc\" (UniqueName: \"kubernetes.io/projected/3c94b199-d756-4ba6-8b21-a491a98cc75b-kube-api-access-d92vc\") pod \"ingress-operator-5b745b69d9-l4z4z\" (UID: \"3c94b199-d756-4ba6-8b21-a491a98cc75b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548682 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/21ac7bdf-1788-4bc1-b777-5eb6290b4fb8-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-pcmvk\" (UID: \"21ac7bdf-1788-4bc1-b777-5eb6290b4fb8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548724 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a595e387-19b3-41f5-9330-3320991a6ca7-proxy-tls\") pod \"machine-config-operator-74547568cd-hkq78\" (UID: \"a595e387-19b3-41f5-9330-3320991a6ca7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548755 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-km4vk\" (UniqueName: \"kubernetes.io/projected/d9a30f78-3dc1-4289-9560-c385125df7de-kube-api-access-km4vk\") pod \"control-plane-machine-set-operator-78cbb6b69f-qpchx\" (UID: \"d9a30f78-3dc1-4289-9560-c385125df7de\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qpchx" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548785 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548822 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548852 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0d67a630-478b-447f-8c40-6b26cbbcbe5e-etcd-service-ca\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548889 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c6d40e7-7e34-46bc-9226-307ed8f18a90-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-9spjg\" (UID: \"1c6d40e7-7e34-46bc-9226-307ed8f18a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548913 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-audit-policies\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548939 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9v56d\" (UniqueName: \"kubernetes.io/projected/b1d67235-5081-4c66-acaa-0620c30e170e-kube-api-access-9v56d\") pod \"router-default-5444994796-8c87n\" (UID: \"b1d67235-5081-4c66-acaa-0620c30e170e\") " pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548966 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548990 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/156ba40d-dabf-4f2e-95ce-af4b456b78ac-machine-approver-tls\") pod \"machine-approver-56656f9798-j5jb7\" (UID: \"156ba40d-dabf-4f2e-95ce-af4b456b78ac\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549016 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-etcd-client\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549042 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/d9a30f78-3dc1-4289-9560-c385125df7de-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-qpchx\" (UID: \"d9a30f78-3dc1-4289-9560-c385125df7de\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qpchx" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549068 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlwtv\" (UniqueName: \"kubernetes.io/projected/967f3bd4-283b-4918-b8d9-f0c810321a3d-kube-api-access-qlwtv\") pod \"catalog-operator-68c6474976-r2xv6\" (UID: \"967f3bd4-283b-4918-b8d9-f0c810321a3d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549105 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xf66\" (UniqueName: \"kubernetes.io/projected/e9337b68-6ba6-41fb-8b01-fe8e77a6a051-kube-api-access-7xf66\") pod \"migrator-59844c95c7-qn96z\" (UID: \"e9337b68-6ba6-41fb-8b01-fe8e77a6a051\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qn96z" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549128 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/967f3bd4-283b-4918-b8d9-f0c810321a3d-profile-collector-cert\") pod \"catalog-operator-68c6474976-r2xv6\" (UID: \"967f3bd4-283b-4918-b8d9-f0c810321a3d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549157 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-serving-cert\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549182 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549219 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-audit-dir\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549246 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/593d3e61-46d7-4a61-baba-4b129ad61754-metrics-tls\") pod \"dns-operator-744455d44c-jprht\" (UID: \"593d3e61-46d7-4a61-baba-4b129ad61754\") " pod="openshift-dns-operator/dns-operator-744455d44c-jprht" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549291 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/967f3bd4-283b-4918-b8d9-f0c810321a3d-srv-cert\") pod \"catalog-operator-68c6474976-r2xv6\" (UID: \"967f3bd4-283b-4918-b8d9-f0c810321a3d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549326 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0c8329e-40b2-4f88-8c03-9405383a057d-config\") pod \"console-operator-58897d9998-l8w5x\" (UID: \"c0c8329e-40b2-4f88-8c03-9405383a057d\") " pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549356 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9v88\" (UniqueName: \"kubernetes.io/projected/480d6bb7-ba40-4925-bdab-c536df24feb3-kube-api-access-m9v88\") pod \"packageserver-d55dfcdfc-6bjr5\" (UID: \"480d6bb7-ba40-4925-bdab-c536df24feb3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549384 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/b1d67235-5081-4c66-acaa-0620c30e170e-stats-auth\") pod \"router-default-5444994796-8c87n\" (UID: \"b1d67235-5081-4c66-acaa-0620c30e170e\") " pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549420 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwnnp\" (UniqueName: \"kubernetes.io/projected/a595e387-19b3-41f5-9330-3320991a6ca7-kube-api-access-xwnnp\") pod \"machine-config-operator-74547568cd-hkq78\" (UID: \"a595e387-19b3-41f5-9330-3320991a6ca7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549459 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/b1d67235-5081-4c66-acaa-0620c30e170e-default-certificate\") pod \"router-default-5444994796-8c87n\" (UID: \"b1d67235-5081-4c66-acaa-0620c30e170e\") " pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549492 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a595e387-19b3-41f5-9330-3320991a6ca7-auth-proxy-config\") pod \"machine-config-operator-74547568cd-hkq78\" (UID: \"a595e387-19b3-41f5-9330-3320991a6ca7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549592 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549648 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2wbt\" (UniqueName: \"kubernetes.io/projected/c0c8329e-40b2-4f88-8c03-9405383a057d-kube-api-access-p2wbt\") pod \"console-operator-58897d9998-l8w5x\" (UID: \"c0c8329e-40b2-4f88-8c03-9405383a057d\") " pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549678 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0c8329e-40b2-4f88-8c03-9405383a057d-serving-cert\") pod \"console-operator-58897d9998-l8w5x\" (UID: \"c0c8329e-40b2-4f88-8c03-9405383a057d\") " pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549704 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxmd4\" (UniqueName: \"kubernetes.io/projected/1c6d40e7-7e34-46bc-9226-307ed8f18a90-kube-api-access-xxmd4\") pod \"kube-storage-version-migrator-operator-b67b599dd-9spjg\" (UID: \"1c6d40e7-7e34-46bc-9226-307ed8f18a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549734 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/480d6bb7-ba40-4925-bdab-c536df24feb3-webhook-cert\") pod \"packageserver-d55dfcdfc-6bjr5\" (UID: \"480d6bb7-ba40-4925-bdab-c536df24feb3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549818 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1c6d40e7-7e34-46bc-9226-307ed8f18a90-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-9spjg\" (UID: \"1c6d40e7-7e34-46bc-9226-307ed8f18a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549856 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d67a630-478b-447f-8c40-6b26cbbcbe5e-serving-cert\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549865 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0d67a630-478b-447f-8c40-6b26cbbcbe5e-etcd-ca\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549890 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thhtw\" (UniqueName: \"kubernetes.io/projected/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-kube-api-access-thhtw\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549933 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549958 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549970 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.549492 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/156ba40d-dabf-4f2e-95ce-af4b456b78ac-auth-proxy-config\") pod \"machine-approver-56656f9798-j5jb7\" (UID: \"156ba40d-dabf-4f2e-95ce-af4b456b78ac\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.548675 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: E0106 08:17:22.550625 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.050586669 +0000 UTC m=+145.096759636 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.551027 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a595e387-19b3-41f5-9330-3320991a6ca7-auth-proxy-config\") pod \"machine-config-operator-74547568cd-hkq78\" (UID: \"a595e387-19b3-41f5-9330-3320991a6ca7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.551265 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3c94b199-d756-4ba6-8b21-a491a98cc75b-metrics-tls\") pod \"ingress-operator-5b745b69d9-l4z4z\" (UID: \"3c94b199-d756-4ba6-8b21-a491a98cc75b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.552064 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21ac7bdf-1788-4bc1-b777-5eb6290b4fb8-config\") pod \"kube-controller-manager-operator-78b949d7b-pcmvk\" (UID: \"21ac7bdf-1788-4bc1-b777-5eb6290b4fb8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.552585 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.552822 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-audit-policies\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.552698 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-audit-policies\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.553294 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0d67a630-478b-447f-8c40-6b26cbbcbe5e-etcd-service-ca\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.553957 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.553496 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.553528 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/86a7348c-24b4-4138-83a9-0587e28e72e4-audit-dir\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.553616 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0c8329e-40b2-4f88-8c03-9405383a057d-config\") pod \"console-operator-58897d9998-l8w5x\" (UID: \"c0c8329e-40b2-4f88-8c03-9405383a057d\") " pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.553858 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3c94b199-d756-4ba6-8b21-a491a98cc75b-trusted-ca\") pod \"ingress-operator-5b745b69d9-l4z4z\" (UID: \"3c94b199-d756-4ba6-8b21-a491a98cc75b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.554134 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3c94b199-d756-4ba6-8b21-a491a98cc75b-bound-sa-token\") pod \"ingress-operator-5b745b69d9-l4z4z\" (UID: \"3c94b199-d756-4ba6-8b21-a491a98cc75b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.553443 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-audit-dir\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.554164 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/62ee35e0-2668-4abf-8984-0da411603434-srv-cert\") pod \"olm-operator-6b444d44fb-z2j87\" (UID: \"62ee35e0-2668-4abf-8984-0da411603434\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.554216 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6xk5\" (UniqueName: \"kubernetes.io/projected/62ee35e0-2668-4abf-8984-0da411603434-kube-api-access-l6xk5\") pod \"olm-operator-6b444d44fb-z2j87\" (UID: \"62ee35e0-2668-4abf-8984-0da411603434\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.554273 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.554297 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c0c8329e-40b2-4f88-8c03-9405383a057d-trusted-ca\") pod \"console-operator-58897d9998-l8w5x\" (UID: \"c0c8329e-40b2-4f88-8c03-9405383a057d\") " pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.554321 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/21ac7bdf-1788-4bc1-b777-5eb6290b4fb8-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-pcmvk\" (UID: \"21ac7bdf-1788-4bc1-b777-5eb6290b4fb8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.554396 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/480d6bb7-ba40-4925-bdab-c536df24feb3-tmpfs\") pod \"packageserver-d55dfcdfc-6bjr5\" (UID: \"480d6bb7-ba40-4925-bdab-c536df24feb3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.554418 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0d67a630-478b-447f-8c40-6b26cbbcbe5e-etcd-client\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.554476 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/480d6bb7-ba40-4925-bdab-c536df24feb3-apiservice-cert\") pod \"packageserver-d55dfcdfc-6bjr5\" (UID: \"480d6bb7-ba40-4925-bdab-c536df24feb3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.554498 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7pg9\" (UniqueName: \"kubernetes.io/projected/593d3e61-46d7-4a61-baba-4b129ad61754-kube-api-access-w7pg9\") pod \"dns-operator-744455d44c-jprht\" (UID: \"593d3e61-46d7-4a61-baba-4b129ad61754\") " pod="openshift-dns-operator/dns-operator-744455d44c-jprht" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.554578 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d67a630-478b-447f-8c40-6b26cbbcbe5e-config\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.554606 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/156ba40d-dabf-4f2e-95ce-af4b456b78ac-config\") pod \"machine-approver-56656f9798-j5jb7\" (UID: \"156ba40d-dabf-4f2e-95ce-af4b456b78ac\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.553250 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.555148 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/156ba40d-dabf-4f2e-95ce-af4b456b78ac-config\") pod \"machine-approver-56656f9798-j5jb7\" (UID: \"156ba40d-dabf-4f2e-95ce-af4b456b78ac\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.555210 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.555408 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.555479 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c0c8329e-40b2-4f88-8c03-9405383a057d-trusted-ca\") pod \"console-operator-58897d9998-l8w5x\" (UID: \"c0c8329e-40b2-4f88-8c03-9405383a057d\") " pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.555844 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d67a630-478b-447f-8c40-6b26cbbcbe5e-config\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.555876 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3c94b199-d756-4ba6-8b21-a491a98cc75b-trusted-ca\") pod \"ingress-operator-5b745b69d9-l4z4z\" (UID: \"3c94b199-d756-4ba6-8b21-a491a98cc75b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.555880 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.555948 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/480d6bb7-ba40-4925-bdab-c536df24feb3-tmpfs\") pod \"packageserver-d55dfcdfc-6bjr5\" (UID: \"480d6bb7-ba40-4925-bdab-c536df24feb3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.556636 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d67a630-478b-447f-8c40-6b26cbbcbe5e-serving-cert\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.557005 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-etcd-client\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.557497 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-encryption-config\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.557851 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.558435 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/156ba40d-dabf-4f2e-95ce-af4b456b78ac-machine-approver-tls\") pod \"machine-approver-56656f9798-j5jb7\" (UID: \"156ba40d-dabf-4f2e-95ce-af4b456b78ac\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.559006 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.559315 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0d67a630-478b-447f-8c40-6b26cbbcbe5e-etcd-client\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.559655 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-serving-cert\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.559665 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0c8329e-40b2-4f88-8c03-9405383a057d-serving-cert\") pod \"console-operator-58897d9998-l8w5x\" (UID: \"c0c8329e-40b2-4f88-8c03-9405383a057d\") " pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.560527 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.568089 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.576124 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.584460 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.603853 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.622906 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.644461 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.655602 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:22 crc kubenswrapper[4784]: E0106 08:17:22.655721 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.155694754 +0000 UTC m=+145.201867591 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.655788 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: E0106 08:17:22.656431 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.156383351 +0000 UTC m=+145.202556188 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.663091 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.683518 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.686631 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/d9a30f78-3dc1-4289-9560-c385125df7de-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-qpchx\" (UID: \"d9a30f78-3dc1-4289-9560-c385125df7de\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qpchx" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.703861 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.723844 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.735743 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21ac7bdf-1788-4bc1-b777-5eb6290b4fb8-config\") pod \"kube-controller-manager-operator-78b949d7b-pcmvk\" (UID: \"21ac7bdf-1788-4bc1-b777-5eb6290b4fb8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.743627 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.748115 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/21ac7bdf-1788-4bc1-b777-5eb6290b4fb8-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-pcmvk\" (UID: \"21ac7bdf-1788-4bc1-b777-5eb6290b4fb8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.757392 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:22 crc kubenswrapper[4784]: E0106 08:17:22.757645 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.257612187 +0000 UTC m=+145.303785054 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.757958 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: E0106 08:17:22.758486 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.258461299 +0000 UTC m=+145.304634336 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.763618 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.783752 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.803907 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.823227 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.844027 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.860069 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:22 crc kubenswrapper[4784]: E0106 08:17:22.860400 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.36035303 +0000 UTC m=+145.406526087 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.860867 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: E0106 08:17:22.861585 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.361528365 +0000 UTC m=+145.407701242 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.864078 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.883366 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.903622 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.935790 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.944090 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.962175 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:22 crc kubenswrapper[4784]: E0106 08:17:22.962435 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.462371766 +0000 UTC m=+145.508544603 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.963028 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:22 crc kubenswrapper[4784]: E0106 08:17:22.963529 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.46351967 +0000 UTC m=+145.509692507 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.964349 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.979004 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/593d3e61-46d7-4a61-baba-4b129ad61754-metrics-tls\") pod \"dns-operator-744455d44c-jprht\" (UID: \"593d3e61-46d7-4a61-baba-4b129ad61754\") " pod="openshift-dns-operator/dns-operator-744455d44c-jprht" Jan 06 08:17:22 crc kubenswrapper[4784]: I0106 08:17:22.983668 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.024612 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.044224 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.063839 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.064792 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.065283 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.565250956 +0000 UTC m=+145.611423833 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.065718 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.066119 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.566107028 +0000 UTC m=+145.612279875 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.075956 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1c6d40e7-7e34-46bc-9226-307ed8f18a90-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-9spjg\" (UID: \"1c6d40e7-7e34-46bc-9226-307ed8f18a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.085475 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.104040 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.124596 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.144941 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.152786 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c6d40e7-7e34-46bc-9226-307ed8f18a90-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-9spjg\" (UID: \"1c6d40e7-7e34-46bc-9226-307ed8f18a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.166290 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.167056 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.167376 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.667343074 +0000 UTC m=+145.713515941 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.167656 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.168235 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.668212308 +0000 UTC m=+145.714385185 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.183176 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.204814 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.223809 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.244182 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.264173 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.268950 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.269121 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.769102091 +0000 UTC m=+145.815274938 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.269894 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.270228 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.770218403 +0000 UTC m=+145.816391250 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.278268 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a595e387-19b3-41f5-9330-3320991a6ca7-proxy-tls\") pod \"machine-config-operator-74547568cd-hkq78\" (UID: \"a595e387-19b3-41f5-9330-3320991a6ca7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.285090 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.289276 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a595e387-19b3-41f5-9330-3320991a6ca7-images\") pod \"machine-config-operator-74547568cd-hkq78\" (UID: \"a595e387-19b3-41f5-9330-3320991a6ca7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.303472 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.324131 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.343198 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.356039 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/b1d67235-5081-4c66-acaa-0620c30e170e-default-certificate\") pod \"router-default-5444994796-8c87n\" (UID: \"b1d67235-5081-4c66-acaa-0620c30e170e\") " pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.363724 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.370310 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/b1d67235-5081-4c66-acaa-0620c30e170e-stats-auth\") pod \"router-default-5444994796-8c87n\" (UID: \"b1d67235-5081-4c66-acaa-0620c30e170e\") " pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.370667 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.370944 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.870912789 +0000 UTC m=+145.917085666 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.371130 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.371754 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.8717277 +0000 UTC m=+145.917900577 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.384074 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.393435 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b1d67235-5081-4c66-acaa-0620c30e170e-metrics-certs\") pod \"router-default-5444994796-8c87n\" (UID: \"b1d67235-5081-4c66-acaa-0620c30e170e\") " pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.402012 4784 request.go:700] Waited for 1.01780687s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress/configmaps?fieldSelector=metadata.name%3Dservice-ca-bundle&limit=500&resourceVersion=0 Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.404452 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.410047 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b1d67235-5081-4c66-acaa-0620c30e170e-service-ca-bundle\") pod \"router-default-5444994796-8c87n\" (UID: \"b1d67235-5081-4c66-acaa-0620c30e170e\") " pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.423420 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.444661 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.458271 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/967f3bd4-283b-4918-b8d9-f0c810321a3d-srv-cert\") pod \"catalog-operator-68c6474976-r2xv6\" (UID: \"967f3bd4-283b-4918-b8d9-f0c810321a3d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.464367 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.472335 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.473152 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.973111902 +0000 UTC m=+146.019284779 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.473250 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.473798 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:23.973770078 +0000 UTC m=+146.019942945 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.483088 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.494756 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/62ee35e0-2668-4abf-8984-0da411603434-profile-collector-cert\") pod \"olm-operator-6b444d44fb-z2j87\" (UID: \"62ee35e0-2668-4abf-8984-0da411603434\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.499342 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/967f3bd4-283b-4918-b8d9-f0c810321a3d-profile-collector-cert\") pod \"catalog-operator-68c6474976-r2xv6\" (UID: \"967f3bd4-283b-4918-b8d9-f0c810321a3d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.504456 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.523623 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.544147 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.549213 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/62ee35e0-2668-4abf-8984-0da411603434-srv-cert\") pod \"olm-operator-6b444d44fb-z2j87\" (UID: \"62ee35e0-2668-4abf-8984-0da411603434\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.553506 4784 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: failed to sync secret cache: timed out waiting for the condition Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.553652 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/480d6bb7-ba40-4925-bdab-c536df24feb3-webhook-cert podName:480d6bb7-ba40-4925-bdab-c536df24feb3 nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.053621323 +0000 UTC m=+146.099794200 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/480d6bb7-ba40-4925-bdab-c536df24feb3-webhook-cert") pod "packageserver-d55dfcdfc-6bjr5" (UID: "480d6bb7-ba40-4925-bdab-c536df24feb3") : failed to sync secret cache: timed out waiting for the condition Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.555761 4784 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: failed to sync secret cache: timed out waiting for the condition Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.555861 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/480d6bb7-ba40-4925-bdab-c536df24feb3-apiservice-cert podName:480d6bb7-ba40-4925-bdab-c536df24feb3 nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.055829257 +0000 UTC m=+146.102002184 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/480d6bb7-ba40-4925-bdab-c536df24feb3-apiservice-cert") pod "packageserver-d55dfcdfc-6bjr5" (UID: "480d6bb7-ba40-4925-bdab-c536df24feb3") : failed to sync secret cache: timed out waiting for the condition Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.564780 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.573892 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.574066 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.074030886 +0000 UTC m=+146.120203763 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.574708 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.575143 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.075125379 +0000 UTC m=+146.121298256 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.583456 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.603893 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.623578 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.644483 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.663476 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.676064 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.676373 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.176337063 +0000 UTC m=+146.222509940 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.676882 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.677389 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.177366063 +0000 UTC m=+146.223538930 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.684891 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.703782 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.743744 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.765096 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.777921 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.778252 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.278173893 +0000 UTC m=+146.324346740 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.779104 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.779636 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.279613558 +0000 UTC m=+146.325786425 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.783901 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.804434 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.823911 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.843434 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.864372 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.880504 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.880726 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.380698848 +0000 UTC m=+146.426871715 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.880858 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.881833 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.381805141 +0000 UTC m=+146.427978018 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.903017 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.906320 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.923815 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.944814 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.965211 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.984935 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 06 08:17:23 crc kubenswrapper[4784]: I0106 08:17:23.987155 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:23 crc kubenswrapper[4784]: E0106 08:17:23.988076 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.488052209 +0000 UTC m=+146.534225086 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.003602 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.024731 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.045253 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.064106 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.084263 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.089281 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.089782 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/480d6bb7-ba40-4925-bdab-c536df24feb3-webhook-cert\") pod \"packageserver-d55dfcdfc-6bjr5\" (UID: \"480d6bb7-ba40-4925-bdab-c536df24feb3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.090012 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/480d6bb7-ba40-4925-bdab-c536df24feb3-apiservice-cert\") pod \"packageserver-d55dfcdfc-6bjr5\" (UID: \"480d6bb7-ba40-4925-bdab-c536df24feb3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:24 crc kubenswrapper[4784]: E0106 08:17:24.090442 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.590410648 +0000 UTC m=+146.636583525 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.095427 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/480d6bb7-ba40-4925-bdab-c536df24feb3-webhook-cert\") pod \"packageserver-d55dfcdfc-6bjr5\" (UID: \"480d6bb7-ba40-4925-bdab-c536df24feb3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.098150 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/480d6bb7-ba40-4925-bdab-c536df24feb3-apiservice-cert\") pod \"packageserver-d55dfcdfc-6bjr5\" (UID: \"480d6bb7-ba40-4925-bdab-c536df24feb3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.104062 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.123639 4784 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.143894 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.164007 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.190836 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:24 crc kubenswrapper[4784]: E0106 08:17:24.191038 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.691004981 +0000 UTC m=+146.737177858 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.191376 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.191445 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.191658 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:24 crc kubenswrapper[4784]: E0106 08:17:24.192381 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.692357902 +0000 UTC m=+146.738530779 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.192728 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.197490 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.204853 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.210672 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6qx9\" (UniqueName: \"kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-kube-api-access-c6qx9\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.224670 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.244382 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.264711 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.293266 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.293630 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.293695 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:17:24 crc kubenswrapper[4784]: E0106 08:17:24.293951 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.793921341 +0000 UTC m=+146.840094198 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.298870 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.300052 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.303074 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvgdj\" (UniqueName: \"kubernetes.io/projected/722dd671-6776-4731-8055-795772f78c77-kube-api-access-xvgdj\") pod \"openshift-config-operator-7777fb866f-gr79s\" (UID: \"722dd671-6776-4731-8055-795772f78c77\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.322340 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8qbm\" (UniqueName: \"kubernetes.io/projected/2cb40f53-37df-4f44-9bd6-cfb855f08935-kube-api-access-w8qbm\") pod \"apiserver-76f77b778f-nm6wl\" (UID: \"2cb40f53-37df-4f44-9bd6-cfb855f08935\") " pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.344622 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j46dc\" (UniqueName: \"kubernetes.io/projected/c0033eba-a627-460c-b782-04628acbadcf-kube-api-access-j46dc\") pod \"machine-api-operator-5694c8668f-x5q85\" (UID: \"c0033eba-a627-460c-b782-04628acbadcf\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.359647 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2vcs\" (UniqueName: \"kubernetes.io/projected/ff8d015b-29e9-47bf-8735-eec268cb7d3a-kube-api-access-p2vcs\") pod \"controller-manager-879f6c89f-9tb2r\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.370144 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.394634 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdfv8\" (UniqueName: \"kubernetes.io/projected/75e1968d-8eee-46f2-b737-f33e8e48fbfd-kube-api-access-mdfv8\") pod \"openshift-apiserver-operator-796bbdcf4f-qqglh\" (UID: \"75e1968d-8eee-46f2-b737-f33e8e48fbfd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.395110 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:24 crc kubenswrapper[4784]: E0106 08:17:24.395496 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:24.89547834 +0000 UTC m=+146.941651187 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.395836 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.395899 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.402828 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f9a7a2ab-be43-46e9-a5a6-21c3100a55ef-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-nb4n9\" (UID: \"f9a7a2ab-be43-46e9-a5a6-21c3100a55ef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.405420 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.424762 4784 request.go:700] Waited for 1.982573536s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/serviceaccounts/console/token Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.426610 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.432527 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgt7f\" (UniqueName: \"kubernetes.io/projected/58b87341-53f1-4b38-807b-964e45e69986-kube-api-access-vgt7f\") pod \"route-controller-manager-6576b87f9c-2tmmv\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.457829 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7ns5\" (UniqueName: \"kubernetes.io/projected/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-kube-api-access-f7ns5\") pod \"console-f9d7485db-q2d7x\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.463976 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.465179 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.473126 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.481740 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.484205 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-bound-sa-token\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.487065 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.500094 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:24 crc kubenswrapper[4784]: E0106 08:17:24.500708 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:25.000681308 +0000 UTC m=+147.046854165 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.503655 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.546306 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpdkx\" (UniqueName: \"kubernetes.io/projected/156ba40d-dabf-4f2e-95ce-af4b456b78ac-kube-api-access-jpdkx\") pod \"machine-approver-56656f9798-j5jb7\" (UID: \"156ba40d-dabf-4f2e-95ce-af4b456b78ac\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.571849 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7njct\" (UniqueName: \"kubernetes.io/projected/0d67a630-478b-447f-8c40-6b26cbbcbe5e-kube-api-access-7njct\") pod \"etcd-operator-b45778765-mthsj\" (UID: \"0d67a630-478b-447f-8c40-6b26cbbcbe5e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.587740 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwnnp\" (UniqueName: \"kubernetes.io/projected/a595e387-19b3-41f5-9330-3320991a6ca7-kube-api-access-xwnnp\") pod \"machine-config-operator-74547568cd-hkq78\" (UID: \"a595e387-19b3-41f5-9330-3320991a6ca7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.588003 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.603767 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thhtw\" (UniqueName: \"kubernetes.io/projected/a3e5f9ba-45cf-41fe-8942-8366faa1ebd1-kube-api-access-thhtw\") pod \"apiserver-7bbb656c7d-wdqc5\" (UID: \"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.607389 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:24 crc kubenswrapper[4784]: E0106 08:17:24.609554 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:25.109522526 +0000 UTC m=+147.155695363 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.627010 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nb6d7\" (UniqueName: \"kubernetes.io/projected/86a7348c-24b4-4138-83a9-0587e28e72e4-kube-api-access-nb6d7\") pod \"oauth-openshift-558db77b4-pln6n\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.644132 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlwtv\" (UniqueName: \"kubernetes.io/projected/967f3bd4-283b-4918-b8d9-f0c810321a3d-kube-api-access-qlwtv\") pod \"catalog-operator-68c6474976-r2xv6\" (UID: \"967f3bd4-283b-4918-b8d9-f0c810321a3d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.655272 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.659270 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9v56d\" (UniqueName: \"kubernetes.io/projected/b1d67235-5081-4c66-acaa-0620c30e170e-kube-api-access-9v56d\") pod \"router-default-5444994796-8c87n\" (UID: \"b1d67235-5081-4c66-acaa-0620c30e170e\") " pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.681739 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xf66\" (UniqueName: \"kubernetes.io/projected/e9337b68-6ba6-41fb-8b01-fe8e77a6a051-kube-api-access-7xf66\") pod \"migrator-59844c95c7-qn96z\" (UID: \"e9337b68-6ba6-41fb-8b01-fe8e77a6a051\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qn96z" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.699333 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxmd4\" (UniqueName: \"kubernetes.io/projected/1c6d40e7-7e34-46bc-9226-307ed8f18a90-kube-api-access-xxmd4\") pod \"kube-storage-version-migrator-operator-b67b599dd-9spjg\" (UID: \"1c6d40e7-7e34-46bc-9226-307ed8f18a90\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.705687 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.710098 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:24 crc kubenswrapper[4784]: E0106 08:17:24.710461 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:25.21044293 +0000 UTC m=+147.256615767 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.714099 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.717995 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d92vc\" (UniqueName: \"kubernetes.io/projected/3c94b199-d756-4ba6-8b21-a491a98cc75b-kube-api-access-d92vc\") pod \"ingress-operator-5b745b69d9-l4z4z\" (UID: \"3c94b199-d756-4ba6-8b21-a491a98cc75b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.727205 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.733960 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qn96z" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.753586 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.766393 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9v88\" (UniqueName: \"kubernetes.io/projected/480d6bb7-ba40-4925-bdab-c536df24feb3-kube-api-access-m9v88\") pod \"packageserver-d55dfcdfc-6bjr5\" (UID: \"480d6bb7-ba40-4925-bdab-c536df24feb3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.784820 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/21ac7bdf-1788-4bc1-b777-5eb6290b4fb8-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-pcmvk\" (UID: \"21ac7bdf-1788-4bc1-b777-5eb6290b4fb8\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.801261 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.802320 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-km4vk\" (UniqueName: \"kubernetes.io/projected/d9a30f78-3dc1-4289-9560-c385125df7de-kube-api-access-km4vk\") pod \"control-plane-machine-set-operator-78cbb6b69f-qpchx\" (UID: \"d9a30f78-3dc1-4289-9560-c385125df7de\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qpchx" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.805893 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.811249 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2wbt\" (UniqueName: \"kubernetes.io/projected/c0c8329e-40b2-4f88-8c03-9405383a057d-kube-api-access-p2wbt\") pod \"console-operator-58897d9998-l8w5x\" (UID: \"c0c8329e-40b2-4f88-8c03-9405383a057d\") " pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.811460 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:24 crc kubenswrapper[4784]: E0106 08:17:24.811915 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:25.311739949 +0000 UTC m=+147.357912786 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.814001 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.817438 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3c94b199-d756-4ba6-8b21-a491a98cc75b-bound-sa-token\") pod \"ingress-operator-5b745b69d9-l4z4z\" (UID: \"3c94b199-d756-4ba6-8b21-a491a98cc75b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.827019 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.846905 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6xk5\" (UniqueName: \"kubernetes.io/projected/62ee35e0-2668-4abf-8984-0da411603434-kube-api-access-l6xk5\") pod \"olm-operator-6b444d44fb-z2j87\" (UID: \"62ee35e0-2668-4abf-8984-0da411603434\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.867445 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7pg9\" (UniqueName: \"kubernetes.io/projected/593d3e61-46d7-4a61-baba-4b129ad61754-kube-api-access-w7pg9\") pod \"dns-operator-744455d44c-jprht\" (UID: \"593d3e61-46d7-4a61-baba-4b129ad61754\") " pod="openshift-dns-operator/dns-operator-744455d44c-jprht" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.901369 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-x5q85"] Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.904792 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.914087 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.916175 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:24 crc kubenswrapper[4784]: E0106 08:17:24.916772 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:25.416739079 +0000 UTC m=+147.462911916 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.916831 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/93589e54-9797-4be3-8daf-52530390571d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-m52dx\" (UID: \"93589e54-9797-4be3-8daf-52530390571d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.916857 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpxbn\" (UniqueName: \"kubernetes.io/projected/c6a86933-d0ee-4f96-a9d5-265e4eeceac3-kube-api-access-zpxbn\") pod \"cluster-image-registry-operator-dc59b4c8b-ql8jm\" (UID: \"c6a86933-d0ee-4f96-a9d5-265e4eeceac3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.916923 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgh72\" (UniqueName: \"kubernetes.io/projected/f114d139-4120-49ed-a3a2-bf85c2cb3a84-kube-api-access-bgh72\") pod \"downloads-7954f5f757-fxbll\" (UID: \"f114d139-4120-49ed-a3a2-bf85c2cb3a84\") " pod="openshift-console/downloads-7954f5f757-fxbll" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.916939 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/508a53a9-5d09-4f87-a763-4ddaac552f24-config\") pod \"service-ca-operator-777779d784-s54vm\" (UID: \"508a53a9-5d09-4f87-a763-4ddaac552f24\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s54vm" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.916958 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9fa124c5-65e7-4744-867f-734ef10ca7c2-serving-cert\") pod \"authentication-operator-69f744f599-pl4c4\" (UID: \"9fa124c5-65e7-4744-867f-734ef10ca7c2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.916975 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f7787a3d-2716-4923-8b48-9dd09b459af9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-krcbh\" (UID: \"f7787a3d-2716-4923-8b48-9dd09b459af9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-krcbh" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917002 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9fa124c5-65e7-4744-867f-734ef10ca7c2-service-ca-bundle\") pod \"authentication-operator-69f744f599-pl4c4\" (UID: \"9fa124c5-65e7-4744-867f-734ef10ca7c2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917024 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fa124c5-65e7-4744-867f-734ef10ca7c2-config\") pod \"authentication-operator-69f744f599-pl4c4\" (UID: \"9fa124c5-65e7-4744-867f-734ef10ca7c2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917088 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vd695\" (UniqueName: \"kubernetes.io/projected/9fa124c5-65e7-4744-867f-734ef10ca7c2-kube-api-access-vd695\") pod \"authentication-operator-69f744f599-pl4c4\" (UID: \"9fa124c5-65e7-4744-867f-734ef10ca7c2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917107 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9fa124c5-65e7-4744-867f-734ef10ca7c2-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-pl4c4\" (UID: \"9fa124c5-65e7-4744-867f-734ef10ca7c2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917125 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zl865\" (UniqueName: \"kubernetes.io/projected/93589e54-9797-4be3-8daf-52530390571d-kube-api-access-zl865\") pod \"openshift-controller-manager-operator-756b6f6bc6-m52dx\" (UID: \"93589e54-9797-4be3-8daf-52530390571d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917145 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c6a86933-d0ee-4f96-a9d5-265e4eeceac3-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-ql8jm\" (UID: \"c6a86933-d0ee-4f96-a9d5-265e4eeceac3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917163 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/50cf8327-9f32-4674-99ad-8fc015380ac6-proxy-tls\") pod \"machine-config-controller-84d6567774-z9l7r\" (UID: \"50cf8327-9f32-4674-99ad-8fc015380ac6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917206 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bebca40e-c6cf-4932-bd25-fae039fc8607-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lmk7x\" (UID: \"bebca40e-c6cf-4932-bd25-fae039fc8607\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917238 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bebca40e-c6cf-4932-bd25-fae039fc8607-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lmk7x\" (UID: \"bebca40e-c6cf-4932-bd25-fae039fc8607\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917291 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bebca40e-c6cf-4932-bd25-fae039fc8607-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lmk7x\" (UID: \"bebca40e-c6cf-4932-bd25-fae039fc8607\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917322 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ss8hh\" (UniqueName: \"kubernetes.io/projected/508a53a9-5d09-4f87-a763-4ddaac552f24-kube-api-access-ss8hh\") pod \"service-ca-operator-777779d784-s54vm\" (UID: \"508a53a9-5d09-4f87-a763-4ddaac552f24\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s54vm" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917351 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c6a86933-d0ee-4f96-a9d5-265e4eeceac3-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-ql8jm\" (UID: \"c6a86933-d0ee-4f96-a9d5-265e4eeceac3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917384 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/50cf8327-9f32-4674-99ad-8fc015380ac6-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-z9l7r\" (UID: \"50cf8327-9f32-4674-99ad-8fc015380ac6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917413 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r25jp\" (UniqueName: \"kubernetes.io/projected/f7787a3d-2716-4923-8b48-9dd09b459af9-kube-api-access-r25jp\") pod \"cluster-samples-operator-665b6dd947-krcbh\" (UID: \"f7787a3d-2716-4923-8b48-9dd09b459af9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-krcbh" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917436 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917453 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2r678\" (UniqueName: \"kubernetes.io/projected/50cf8327-9f32-4674-99ad-8fc015380ac6-kube-api-access-2r678\") pod \"machine-config-controller-84d6567774-z9l7r\" (UID: \"50cf8327-9f32-4674-99ad-8fc015380ac6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917473 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/c6a86933-d0ee-4f96-a9d5-265e4eeceac3-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-ql8jm\" (UID: \"c6a86933-d0ee-4f96-a9d5-265e4eeceac3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917489 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/508a53a9-5d09-4f87-a763-4ddaac552f24-serving-cert\") pod \"service-ca-operator-777779d784-s54vm\" (UID: \"508a53a9-5d09-4f87-a763-4ddaac552f24\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s54vm" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.917539 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93589e54-9797-4be3-8daf-52530390571d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-m52dx\" (UID: \"93589e54-9797-4be3-8daf-52530390571d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx" Jan 06 08:17:24 crc kubenswrapper[4784]: E0106 08:17:24.920671 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:25.42065233 +0000 UTC m=+147.466825257 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.923567 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qpchx" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.929418 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.943940 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-jprht" Jan 06 08:17:24 crc kubenswrapper[4784]: I0106 08:17:24.992802 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.019616 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.019763 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcp29\" (UniqueName: \"kubernetes.io/projected/556124eb-b442-4918-a98f-c40a2adf4178-kube-api-access-fcp29\") pod \"package-server-manager-789f6589d5-v7nhq\" (UID: \"556124eb-b442-4918-a98f-c40a2adf4178\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.019795 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9fa124c5-65e7-4744-867f-734ef10ca7c2-service-ca-bundle\") pod \"authentication-operator-69f744f599-pl4c4\" (UID: \"9fa124c5-65e7-4744-867f-734ef10ca7c2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.019831 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srw6m\" (UniqueName: \"kubernetes.io/projected/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-kube-api-access-srw6m\") pod \"marketplace-operator-79b997595-vkt2h\" (UID: \"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97\") " pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.019850 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/951edc85-0103-4d00-b5b7-960e4c3a40e9-config-volume\") pod \"dns-default-jcbhk\" (UID: \"951edc85-0103-4d00-b5b7-960e4c3a40e9\") " pod="openshift-dns/dns-default-jcbhk" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.019906 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fa124c5-65e7-4744-867f-734ef10ca7c2-config\") pod \"authentication-operator-69f744f599-pl4c4\" (UID: \"9fa124c5-65e7-4744-867f-734ef10ca7c2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.019923 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/61415408-e654-4546-b523-1ce57ecef357-certs\") pod \"machine-config-server-x7h7q\" (UID: \"61415408-e654-4546-b523-1ce57ecef357\") " pod="openshift-machine-config-operator/machine-config-server-x7h7q" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.019939 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gvw4\" (UniqueName: \"kubernetes.io/projected/f554e179-1e70-4afc-b17b-e3fdc0c13fe3-kube-api-access-9gvw4\") pod \"ingress-canary-z56qx\" (UID: \"f554e179-1e70-4afc-b17b-e3fdc0c13fe3\") " pod="openshift-ingress-canary/ingress-canary-z56qx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.019970 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zm4vh\" (UniqueName: \"kubernetes.io/projected/61415408-e654-4546-b523-1ce57ecef357-kube-api-access-zm4vh\") pod \"machine-config-server-x7h7q\" (UID: \"61415408-e654-4546-b523-1ce57ecef357\") " pod="openshift-machine-config-operator/machine-config-server-x7h7q" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020017 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bcdfd6e9-1825-44a3-9255-733080dd11d9-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-s84bx\" (UID: \"bcdfd6e9-1825-44a3-9255-733080dd11d9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-s84bx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020034 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/61415408-e654-4546-b523-1ce57ecef357-node-bootstrap-token\") pod \"machine-config-server-x7h7q\" (UID: \"61415408-e654-4546-b523-1ce57ecef357\") " pod="openshift-machine-config-operator/machine-config-server-x7h7q" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020064 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-vkt2h\" (UID: \"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97\") " pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020081 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnhcg\" (UniqueName: \"kubernetes.io/projected/d812b40c-8f10-414b-9e2b-43a25c2f4430-kube-api-access-mnhcg\") pod \"service-ca-9c57cc56f-mdssl\" (UID: \"d812b40c-8f10-414b-9e2b-43a25c2f4430\") " pod="openshift-service-ca/service-ca-9c57cc56f-mdssl" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020096 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vd695\" (UniqueName: \"kubernetes.io/projected/9fa124c5-65e7-4744-867f-734ef10ca7c2-kube-api-access-vd695\") pod \"authentication-operator-69f744f599-pl4c4\" (UID: \"9fa124c5-65e7-4744-867f-734ef10ca7c2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020128 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-socket-dir\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020152 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9fa124c5-65e7-4744-867f-734ef10ca7c2-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-pl4c4\" (UID: \"9fa124c5-65e7-4744-867f-734ef10ca7c2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020168 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zl865\" (UniqueName: \"kubernetes.io/projected/93589e54-9797-4be3-8daf-52530390571d-kube-api-access-zl865\") pod \"openshift-controller-manager-operator-756b6f6bc6-m52dx\" (UID: \"93589e54-9797-4be3-8daf-52530390571d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020203 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fg9z\" (UniqueName: \"kubernetes.io/projected/951edc85-0103-4d00-b5b7-960e4c3a40e9-kube-api-access-7fg9z\") pod \"dns-default-jcbhk\" (UID: \"951edc85-0103-4d00-b5b7-960e4c3a40e9\") " pod="openshift-dns/dns-default-jcbhk" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020218 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d812b40c-8f10-414b-9e2b-43a25c2f4430-signing-key\") pod \"service-ca-9c57cc56f-mdssl\" (UID: \"d812b40c-8f10-414b-9e2b-43a25c2f4430\") " pod="openshift-service-ca/service-ca-9c57cc56f-mdssl" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020236 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c6a86933-d0ee-4f96-a9d5-265e4eeceac3-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-ql8jm\" (UID: \"c6a86933-d0ee-4f96-a9d5-265e4eeceac3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020271 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/50cf8327-9f32-4674-99ad-8fc015380ac6-proxy-tls\") pod \"machine-config-controller-84d6567774-z9l7r\" (UID: \"50cf8327-9f32-4674-99ad-8fc015380ac6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020287 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-registration-dir\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020348 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pgfs\" (UniqueName: \"kubernetes.io/projected/bcdfd6e9-1825-44a3-9255-733080dd11d9-kube-api-access-9pgfs\") pod \"multus-admission-controller-857f4d67dd-s84bx\" (UID: \"bcdfd6e9-1825-44a3-9255-733080dd11d9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-s84bx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020421 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-mountpoint-dir\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020488 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bebca40e-c6cf-4932-bd25-fae039fc8607-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lmk7x\" (UID: \"bebca40e-c6cf-4932-bd25-fae039fc8607\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020523 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bebca40e-c6cf-4932-bd25-fae039fc8607-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lmk7x\" (UID: \"bebca40e-c6cf-4932-bd25-fae039fc8607\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020566 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-csi-data-dir\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020582 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbxsf\" (UniqueName: \"kubernetes.io/projected/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-kube-api-access-mbxsf\") pod \"collect-profiles-29461455-dcj5m\" (UID: \"8d0f11e4-cf5f-414f-ab5c-71c303b6774c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020596 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/951edc85-0103-4d00-b5b7-960e4c3a40e9-metrics-tls\") pod \"dns-default-jcbhk\" (UID: \"951edc85-0103-4d00-b5b7-960e4c3a40e9\") " pod="openshift-dns/dns-default-jcbhk" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020621 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4m42\" (UniqueName: \"kubernetes.io/projected/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-kube-api-access-h4m42\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020635 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-vkt2h\" (UID: \"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97\") " pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020655 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bebca40e-c6cf-4932-bd25-fae039fc8607-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lmk7x\" (UID: \"bebca40e-c6cf-4932-bd25-fae039fc8607\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020724 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d812b40c-8f10-414b-9e2b-43a25c2f4430-signing-cabundle\") pod \"service-ca-9c57cc56f-mdssl\" (UID: \"d812b40c-8f10-414b-9e2b-43a25c2f4430\") " pod="openshift-service-ca/service-ca-9c57cc56f-mdssl" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020749 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ss8hh\" (UniqueName: \"kubernetes.io/projected/508a53a9-5d09-4f87-a763-4ddaac552f24-kube-api-access-ss8hh\") pod \"service-ca-operator-777779d784-s54vm\" (UID: \"508a53a9-5d09-4f87-a763-4ddaac552f24\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s54vm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020792 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c6a86933-d0ee-4f96-a9d5-265e4eeceac3-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-ql8jm\" (UID: \"c6a86933-d0ee-4f96-a9d5-265e4eeceac3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020816 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/50cf8327-9f32-4674-99ad-8fc015380ac6-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-z9l7r\" (UID: \"50cf8327-9f32-4674-99ad-8fc015380ac6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020841 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f554e179-1e70-4afc-b17b-e3fdc0c13fe3-cert\") pod \"ingress-canary-z56qx\" (UID: \"f554e179-1e70-4afc-b17b-e3fdc0c13fe3\") " pod="openshift-ingress-canary/ingress-canary-z56qx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020883 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r25jp\" (UniqueName: \"kubernetes.io/projected/f7787a3d-2716-4923-8b48-9dd09b459af9-kube-api-access-r25jp\") pod \"cluster-samples-operator-665b6dd947-krcbh\" (UID: \"f7787a3d-2716-4923-8b48-9dd09b459af9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-krcbh" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020904 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2r678\" (UniqueName: \"kubernetes.io/projected/50cf8327-9f32-4674-99ad-8fc015380ac6-kube-api-access-2r678\") pod \"machine-config-controller-84d6567774-z9l7r\" (UID: \"50cf8327-9f32-4674-99ad-8fc015380ac6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020930 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/556124eb-b442-4918-a98f-c40a2adf4178-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-v7nhq\" (UID: \"556124eb-b442-4918-a98f-c40a2adf4178\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020948 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-plugins-dir\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020975 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/c6a86933-d0ee-4f96-a9d5-265e4eeceac3-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-ql8jm\" (UID: \"c6a86933-d0ee-4f96-a9d5-265e4eeceac3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.020991 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/508a53a9-5d09-4f87-a763-4ddaac552f24-serving-cert\") pod \"service-ca-operator-777779d784-s54vm\" (UID: \"508a53a9-5d09-4f87-a763-4ddaac552f24\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s54vm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.021016 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-secret-volume\") pod \"collect-profiles-29461455-dcj5m\" (UID: \"8d0f11e4-cf5f-414f-ab5c-71c303b6774c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.021053 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93589e54-9797-4be3-8daf-52530390571d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-m52dx\" (UID: \"93589e54-9797-4be3-8daf-52530390571d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.021119 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpxbn\" (UniqueName: \"kubernetes.io/projected/c6a86933-d0ee-4f96-a9d5-265e4eeceac3-kube-api-access-zpxbn\") pod \"cluster-image-registry-operator-dc59b4c8b-ql8jm\" (UID: \"c6a86933-d0ee-4f96-a9d5-265e4eeceac3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.021136 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-config-volume\") pod \"collect-profiles-29461455-dcj5m\" (UID: \"8d0f11e4-cf5f-414f-ab5c-71c303b6774c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.021153 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/93589e54-9797-4be3-8daf-52530390571d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-m52dx\" (UID: \"93589e54-9797-4be3-8daf-52530390571d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.021199 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/508a53a9-5d09-4f87-a763-4ddaac552f24-config\") pod \"service-ca-operator-777779d784-s54vm\" (UID: \"508a53a9-5d09-4f87-a763-4ddaac552f24\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s54vm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.021224 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9fa124c5-65e7-4744-867f-734ef10ca7c2-serving-cert\") pod \"authentication-operator-69f744f599-pl4c4\" (UID: \"9fa124c5-65e7-4744-867f-734ef10ca7c2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.021241 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgh72\" (UniqueName: \"kubernetes.io/projected/f114d139-4120-49ed-a3a2-bf85c2cb3a84-kube-api-access-bgh72\") pod \"downloads-7954f5f757-fxbll\" (UID: \"f114d139-4120-49ed-a3a2-bf85c2cb3a84\") " pod="openshift-console/downloads-7954f5f757-fxbll" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.021257 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f7787a3d-2716-4923-8b48-9dd09b459af9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-krcbh\" (UID: \"f7787a3d-2716-4923-8b48-9dd09b459af9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-krcbh" Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.023990 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:25.523974146 +0000 UTC m=+147.570146983 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.024847 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9fa124c5-65e7-4744-867f-734ef10ca7c2-service-ca-bundle\") pod \"authentication-operator-69f744f599-pl4c4\" (UID: \"9fa124c5-65e7-4744-867f-734ef10ca7c2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.048535 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9fa124c5-65e7-4744-867f-734ef10ca7c2-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-pl4c4\" (UID: \"9fa124c5-65e7-4744-867f-734ef10ca7c2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.052675 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f7787a3d-2716-4923-8b48-9dd09b459af9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-krcbh\" (UID: \"f7787a3d-2716-4923-8b48-9dd09b459af9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-krcbh" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.052924 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.069503 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c6a86933-d0ee-4f96-a9d5-265e4eeceac3-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-ql8jm\" (UID: \"c6a86933-d0ee-4f96-a9d5-265e4eeceac3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.070874 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/50cf8327-9f32-4674-99ad-8fc015380ac6-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-z9l7r\" (UID: \"50cf8327-9f32-4674-99ad-8fc015380ac6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.072520 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/508a53a9-5d09-4f87-a763-4ddaac552f24-config\") pod \"service-ca-operator-777779d784-s54vm\" (UID: \"508a53a9-5d09-4f87-a763-4ddaac552f24\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s54vm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.072863 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.083995 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fa124c5-65e7-4744-867f-734ef10ca7c2-config\") pod \"authentication-operator-69f744f599-pl4c4\" (UID: \"9fa124c5-65e7-4744-867f-734ef10ca7c2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.085143 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9fa124c5-65e7-4744-867f-734ef10ca7c2-serving-cert\") pod \"authentication-operator-69f744f599-pl4c4\" (UID: \"9fa124c5-65e7-4744-867f-734ef10ca7c2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.086199 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93589e54-9797-4be3-8daf-52530390571d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-m52dx\" (UID: \"93589e54-9797-4be3-8daf-52530390571d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.097185 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bebca40e-c6cf-4932-bd25-fae039fc8607-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lmk7x\" (UID: \"bebca40e-c6cf-4932-bd25-fae039fc8607\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.106904 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/93589e54-9797-4be3-8daf-52530390571d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-m52dx\" (UID: \"93589e54-9797-4be3-8daf-52530390571d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.107145 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/50cf8327-9f32-4674-99ad-8fc015380ac6-proxy-tls\") pod \"machine-config-controller-84d6567774-z9l7r\" (UID: \"50cf8327-9f32-4674-99ad-8fc015380ac6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.107974 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bebca40e-c6cf-4932-bd25-fae039fc8607-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lmk7x\" (UID: \"bebca40e-c6cf-4932-bd25-fae039fc8607\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.108373 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/c6a86933-d0ee-4f96-a9d5-265e4eeceac3-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-ql8jm\" (UID: \"c6a86933-d0ee-4f96-a9d5-265e4eeceac3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.108670 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/508a53a9-5d09-4f87-a763-4ddaac552f24-serving-cert\") pod \"service-ca-operator-777779d784-s54vm\" (UID: \"508a53a9-5d09-4f87-a763-4ddaac552f24\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s54vm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.113068 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-gr79s"] Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.113497 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bebca40e-c6cf-4932-bd25-fae039fc8607-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lmk7x\" (UID: \"bebca40e-c6cf-4932-bd25-fae039fc8607\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.120295 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-8c87n" event={"ID":"b1d67235-5081-4c66-acaa-0620c30e170e","Type":"ContainerStarted","Data":"5e548fe2c1ba42bbaa9557474d8a0c99d78a64911a124a875cd86131ee403e14"} Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.120403 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zl865\" (UniqueName: \"kubernetes.io/projected/93589e54-9797-4be3-8daf-52530390571d-kube-api-access-zl865\") pod \"openshift-controller-manager-operator-756b6f6bc6-m52dx\" (UID: \"93589e54-9797-4be3-8daf-52530390571d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.122861 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-config-volume\") pod \"collect-profiles-29461455-dcj5m\" (UID: \"8d0f11e4-cf5f-414f-ab5c-71c303b6774c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.122928 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcp29\" (UniqueName: \"kubernetes.io/projected/556124eb-b442-4918-a98f-c40a2adf4178-kube-api-access-fcp29\") pod \"package-server-manager-789f6589d5-v7nhq\" (UID: \"556124eb-b442-4918-a98f-c40a2adf4178\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.122951 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srw6m\" (UniqueName: \"kubernetes.io/projected/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-kube-api-access-srw6m\") pod \"marketplace-operator-79b997595-vkt2h\" (UID: \"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97\") " pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.123008 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/951edc85-0103-4d00-b5b7-960e4c3a40e9-config-volume\") pod \"dns-default-jcbhk\" (UID: \"951edc85-0103-4d00-b5b7-960e4c3a40e9\") " pod="openshift-dns/dns-default-jcbhk" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.123030 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/61415408-e654-4546-b523-1ce57ecef357-certs\") pod \"machine-config-server-x7h7q\" (UID: \"61415408-e654-4546-b523-1ce57ecef357\") " pod="openshift-machine-config-operator/machine-config-server-x7h7q" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.123078 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gvw4\" (UniqueName: \"kubernetes.io/projected/f554e179-1e70-4afc-b17b-e3fdc0c13fe3-kube-api-access-9gvw4\") pod \"ingress-canary-z56qx\" (UID: \"f554e179-1e70-4afc-b17b-e3fdc0c13fe3\") " pod="openshift-ingress-canary/ingress-canary-z56qx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.123095 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zm4vh\" (UniqueName: \"kubernetes.io/projected/61415408-e654-4546-b523-1ce57ecef357-kube-api-access-zm4vh\") pod \"machine-config-server-x7h7q\" (UID: \"61415408-e654-4546-b523-1ce57ecef357\") " pod="openshift-machine-config-operator/machine-config-server-x7h7q" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.123115 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bcdfd6e9-1825-44a3-9255-733080dd11d9-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-s84bx\" (UID: \"bcdfd6e9-1825-44a3-9255-733080dd11d9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-s84bx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.123157 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/61415408-e654-4546-b523-1ce57ecef357-node-bootstrap-token\") pod \"machine-config-server-x7h7q\" (UID: \"61415408-e654-4546-b523-1ce57ecef357\") " pod="openshift-machine-config-operator/machine-config-server-x7h7q" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.123178 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-vkt2h\" (UID: \"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97\") " pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.123194 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnhcg\" (UniqueName: \"kubernetes.io/projected/d812b40c-8f10-414b-9e2b-43a25c2f4430-kube-api-access-mnhcg\") pod \"service-ca-9c57cc56f-mdssl\" (UID: \"d812b40c-8f10-414b-9e2b-43a25c2f4430\") " pod="openshift-service-ca/service-ca-9c57cc56f-mdssl" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.123249 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-socket-dir\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.123269 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fg9z\" (UniqueName: \"kubernetes.io/projected/951edc85-0103-4d00-b5b7-960e4c3a40e9-kube-api-access-7fg9z\") pod \"dns-default-jcbhk\" (UID: \"951edc85-0103-4d00-b5b7-960e4c3a40e9\") " pod="openshift-dns/dns-default-jcbhk" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.124585 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" event={"ID":"156ba40d-dabf-4f2e-95ce-af4b456b78ac","Type":"ContainerStarted","Data":"1e4d966ecba017e87505267a5c9f17ff26128d5a0bdb5b24e29e5f24dac24b34"} Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.124785 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d812b40c-8f10-414b-9e2b-43a25c2f4430-signing-key\") pod \"service-ca-9c57cc56f-mdssl\" (UID: \"d812b40c-8f10-414b-9e2b-43a25c2f4430\") " pod="openshift-service-ca/service-ca-9c57cc56f-mdssl" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.124815 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-registration-dir\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.124835 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pgfs\" (UniqueName: \"kubernetes.io/projected/bcdfd6e9-1825-44a3-9255-733080dd11d9-kube-api-access-9pgfs\") pod \"multus-admission-controller-857f4d67dd-s84bx\" (UID: \"bcdfd6e9-1825-44a3-9255-733080dd11d9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-s84bx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.124855 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-mountpoint-dir\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.124887 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-csi-data-dir\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.124901 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbxsf\" (UniqueName: \"kubernetes.io/projected/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-kube-api-access-mbxsf\") pod \"collect-profiles-29461455-dcj5m\" (UID: \"8d0f11e4-cf5f-414f-ab5c-71c303b6774c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.124915 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/951edc85-0103-4d00-b5b7-960e4c3a40e9-metrics-tls\") pod \"dns-default-jcbhk\" (UID: \"951edc85-0103-4d00-b5b7-960e4c3a40e9\") " pod="openshift-dns/dns-default-jcbhk" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.124925 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-vkt2h\" (UID: \"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97\") " pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.124932 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4m42\" (UniqueName: \"kubernetes.io/projected/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-kube-api-access-h4m42\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.126515 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-nm6wl"] Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.126611 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-mountpoint-dir\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.126799 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-registration-dir\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.126896 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-csi-data-dir\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.126966 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-vkt2h\" (UID: \"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97\") " pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.127431 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/951edc85-0103-4d00-b5b7-960e4c3a40e9-config-volume\") pod \"dns-default-jcbhk\" (UID: \"951edc85-0103-4d00-b5b7-960e4c3a40e9\") " pod="openshift-dns/dns-default-jcbhk" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.128052 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d812b40c-8f10-414b-9e2b-43a25c2f4430-signing-cabundle\") pod \"service-ca-9c57cc56f-mdssl\" (UID: \"d812b40c-8f10-414b-9e2b-43a25c2f4430\") " pod="openshift-service-ca/service-ca-9c57cc56f-mdssl" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.128115 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f554e179-1e70-4afc-b17b-e3fdc0c13fe3-cert\") pod \"ingress-canary-z56qx\" (UID: \"f554e179-1e70-4afc-b17b-e3fdc0c13fe3\") " pod="openshift-ingress-canary/ingress-canary-z56qx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.128159 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.128185 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/556124eb-b442-4918-a98f-c40a2adf4178-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-v7nhq\" (UID: \"556124eb-b442-4918-a98f-c40a2adf4178\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.128203 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-plugins-dir\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.128233 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-secret-volume\") pod \"collect-profiles-29461455-dcj5m\" (UID: \"8d0f11e4-cf5f-414f-ab5c-71c303b6774c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.128370 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/61415408-e654-4546-b523-1ce57ecef357-node-bootstrap-token\") pod \"machine-config-server-x7h7q\" (UID: \"61415408-e654-4546-b523-1ce57ecef357\") " pod="openshift-machine-config-operator/machine-config-server-x7h7q" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.129461 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d812b40c-8f10-414b-9e2b-43a25c2f4430-signing-cabundle\") pod \"service-ca-9c57cc56f-mdssl\" (UID: \"d812b40c-8f10-414b-9e2b-43a25c2f4430\") " pod="openshift-service-ca/service-ca-9c57cc56f-mdssl" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.129515 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-config-volume\") pod \"collect-profiles-29461455-dcj5m\" (UID: \"8d0f11e4-cf5f-414f-ab5c-71c303b6774c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.129712 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:25.629699794 +0000 UTC m=+147.675872631 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.130012 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/61415408-e654-4546-b523-1ce57ecef357-certs\") pod \"machine-config-server-x7h7q\" (UID: \"61415408-e654-4546-b523-1ce57ecef357\") " pod="openshift-machine-config-operator/machine-config-server-x7h7q" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.130017 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-plugins-dir\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.130447 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c6a86933-d0ee-4f96-a9d5-265e4eeceac3-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-ql8jm\" (UID: \"c6a86933-d0ee-4f96-a9d5-265e4eeceac3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.130866 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bcdfd6e9-1825-44a3-9255-733080dd11d9-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-s84bx\" (UID: \"bcdfd6e9-1825-44a3-9255-733080dd11d9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-s84bx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.134210 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-secret-volume\") pod \"collect-profiles-29461455-dcj5m\" (UID: \"8d0f11e4-cf5f-414f-ab5c-71c303b6774c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.137904 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-socket-dir\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.139591 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f554e179-1e70-4afc-b17b-e3fdc0c13fe3-cert\") pod \"ingress-canary-z56qx\" (UID: \"f554e179-1e70-4afc-b17b-e3fdc0c13fe3\") " pod="openshift-ingress-canary/ingress-canary-z56qx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.147301 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpxbn\" (UniqueName: \"kubernetes.io/projected/c6a86933-d0ee-4f96-a9d5-265e4eeceac3-kube-api-access-zpxbn\") pod \"cluster-image-registry-operator-dc59b4c8b-ql8jm\" (UID: \"c6a86933-d0ee-4f96-a9d5-265e4eeceac3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.147304 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-vkt2h\" (UID: \"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97\") " pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.164634 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv"] Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.175157 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgh72\" (UniqueName: \"kubernetes.io/projected/f114d139-4120-49ed-a3a2-bf85c2cb3a84-kube-api-access-bgh72\") pod \"downloads-7954f5f757-fxbll\" (UID: \"f114d139-4120-49ed-a3a2-bf85c2cb3a84\") " pod="openshift-console/downloads-7954f5f757-fxbll" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.175241 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh"] Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.175297 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mthsj"] Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.175849 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9"] Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.179324 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78"] Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.184576 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vd695\" (UniqueName: \"kubernetes.io/projected/9fa124c5-65e7-4744-867f-734ef10ca7c2-kube-api-access-vd695\") pod \"authentication-operator-69f744f599-pl4c4\" (UID: \"9fa124c5-65e7-4744-867f-734ef10ca7c2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.196166 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.214937 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.218362 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-q2d7x"] Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.219139 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r25jp\" (UniqueName: \"kubernetes.io/projected/f7787a3d-2716-4923-8b48-9dd09b459af9-kube-api-access-r25jp\") pod \"cluster-samples-operator-665b6dd947-krcbh\" (UID: \"f7787a3d-2716-4923-8b48-9dd09b459af9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-krcbh" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.226908 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-9tb2r"] Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.232077 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.232363 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:25.732314044 +0000 UTC m=+147.778486881 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.232603 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.232930 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:25.732913656 +0000 UTC m=+147.779086493 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.235968 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.238719 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2r678\" (UniqueName: \"kubernetes.io/projected/50cf8327-9f32-4674-99ad-8fc015380ac6-kube-api-access-2r678\") pod \"machine-config-controller-84d6567774-z9l7r\" (UID: \"50cf8327-9f32-4674-99ad-8fc015380ac6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.251149 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.258392 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gvw4\" (UniqueName: \"kubernetes.io/projected/f554e179-1e70-4afc-b17b-e3fdc0c13fe3-kube-api-access-9gvw4\") pod \"ingress-canary-z56qx\" (UID: \"f554e179-1e70-4afc-b17b-e3fdc0c13fe3\") " pod="openshift-ingress-canary/ingress-canary-z56qx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.278926 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnhcg\" (UniqueName: \"kubernetes.io/projected/d812b40c-8f10-414b-9e2b-43a25c2f4430-kube-api-access-mnhcg\") pod \"service-ca-9c57cc56f-mdssl\" (UID: \"d812b40c-8f10-414b-9e2b-43a25c2f4430\") " pod="openshift-service-ca/service-ca-9c57cc56f-mdssl" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.299605 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.302282 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zm4vh\" (UniqueName: \"kubernetes.io/projected/61415408-e654-4546-b523-1ce57ecef357-kube-api-access-zm4vh\") pod \"machine-config-server-x7h7q\" (UID: \"61415408-e654-4546-b523-1ce57ecef357\") " pod="openshift-machine-config-operator/machine-config-server-x7h7q" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.304329 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-l8w5x"] Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.320109 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srw6m\" (UniqueName: \"kubernetes.io/projected/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-kube-api-access-srw6m\") pod \"marketplace-operator-79b997595-vkt2h\" (UID: \"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97\") " pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.333867 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.334076 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:25.834038648 +0000 UTC m=+147.880211505 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.334261 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.334617 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:25.83460778 +0000 UTC m=+147.880780627 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.356670 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbxsf\" (UniqueName: \"kubernetes.io/projected/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-kube-api-access-mbxsf\") pod \"collect-profiles-29461455-dcj5m\" (UID: \"8d0f11e4-cf5f-414f-ab5c-71c303b6774c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.364513 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.372942 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.376802 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pgfs\" (UniqueName: \"kubernetes.io/projected/bcdfd6e9-1825-44a3-9255-733080dd11d9-kube-api-access-9pgfs\") pod \"multus-admission-controller-857f4d67dd-s84bx\" (UID: \"bcdfd6e9-1825-44a3-9255-733080dd11d9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-s84bx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.405351 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4m42\" (UniqueName: \"kubernetes.io/projected/655f5b8f-c824-4ef7-aa83-19dcfa0fee56-kube-api-access-h4m42\") pod \"csi-hostpathplugin-9qntj\" (UID: \"655f5b8f-c824-4ef7-aa83-19dcfa0fee56\") " pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.410467 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-9qntj" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.417503 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-z56qx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.418206 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fg9z\" (UniqueName: \"kubernetes.io/projected/951edc85-0103-4d00-b5b7-960e4c3a40e9-kube-api-access-7fg9z\") pod \"dns-default-jcbhk\" (UID: \"951edc85-0103-4d00-b5b7-960e4c3a40e9\") " pod="openshift-dns/dns-default-jcbhk" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.421504 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6"] Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.421761 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-fxbll" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.423850 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-x7h7q" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.435889 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.436090 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:25.936063075 +0000 UTC m=+147.982235912 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.436131 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.436707 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:25.936698719 +0000 UTC m=+147.982871556 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.471977 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5"] Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.478934 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-qn96z"] Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.485754 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-krcbh" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.529804 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/556124eb-b442-4918-a98f-c40a2adf4178-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-v7nhq\" (UID: \"556124eb-b442-4918-a98f-c40a2adf4178\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.529909 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d812b40c-8f10-414b-9e2b-43a25c2f4430-signing-key\") pod \"service-ca-9c57cc56f-mdssl\" (UID: \"d812b40c-8f10-414b-9e2b-43a25c2f4430\") " pod="openshift-service-ca/service-ca-9c57cc56f-mdssl" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.533843 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ss8hh\" (UniqueName: \"kubernetes.io/projected/508a53a9-5d09-4f87-a763-4ddaac552f24-kube-api-access-ss8hh\") pod \"service-ca-operator-777779d784-s54vm\" (UID: \"508a53a9-5d09-4f87-a763-4ddaac552f24\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s54vm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.535178 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcp29\" (UniqueName: \"kubernetes.io/projected/556124eb-b442-4918-a98f-c40a2adf4178-kube-api-access-fcp29\") pod \"package-server-manager-789f6589d5-v7nhq\" (UID: \"556124eb-b442-4918-a98f-c40a2adf4178\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.536385 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/951edc85-0103-4d00-b5b7-960e4c3a40e9-metrics-tls\") pod \"dns-default-jcbhk\" (UID: \"951edc85-0103-4d00-b5b7-960e4c3a40e9\") " pod="openshift-dns/dns-default-jcbhk" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.537779 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.538399 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.038378422 +0000 UTC m=+148.084551259 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:25 crc kubenswrapper[4784]: W0106 08:17:25.542894 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-97635b1b7ef548ee62e5808bd23ba62891f4375cd39a4abad72f0ae7f50a2295 WatchSource:0}: Error finding container 97635b1b7ef548ee62e5808bd23ba62891f4375cd39a4abad72f0ae7f50a2295: Status 404 returned error can't find the container with id 97635b1b7ef548ee62e5808bd23ba62891f4375cd39a4abad72f0ae7f50a2295 Jan 06 08:17:25 crc kubenswrapper[4784]: W0106 08:17:25.567087 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-012bc7e3bcedde5ff07b4d2868b56340e398c9c049d1204426ae0959fed6c058 WatchSource:0}: Error finding container 012bc7e3bcedde5ff07b4d2868b56340e398c9c049d1204426ae0959fed6c058: Status 404 returned error can't find the container with id 012bc7e3bcedde5ff07b4d2868b56340e398c9c049d1204426ae0959fed6c058 Jan 06 08:17:25 crc kubenswrapper[4784]: W0106 08:17:25.574174 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod75e1968d_8eee_46f2_b737_f33e8e48fbfd.slice/crio-299561880a081a07a535b2bd67a88fc391e5ea623f51e9e75d5e4b59a29456cb WatchSource:0}: Error finding container 299561880a081a07a535b2bd67a88fc391e5ea623f51e9e75d5e4b59a29456cb: Status 404 returned error can't find the container with id 299561880a081a07a535b2bd67a88fc391e5ea623f51e9e75d5e4b59a29456cb Jan 06 08:17:25 crc kubenswrapper[4784]: W0106 08:17:25.581971 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda595e387_19b3_41f5_9330_3320991a6ca7.slice/crio-31495af97a61e5abb2fa5f04852474ed01cc120d89fa186f1c1a6bf91f89fe88 WatchSource:0}: Error finding container 31495af97a61e5abb2fa5f04852474ed01cc120d89fa186f1c1a6bf91f89fe88: Status 404 returned error can't find the container with id 31495af97a61e5abb2fa5f04852474ed01cc120d89fa186f1c1a6bf91f89fe88 Jan 06 08:17:25 crc kubenswrapper[4784]: W0106 08:17:25.583277 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded32fa0e_4e6b_4cae_a4ec_5733e7beaac0.slice/crio-b738c5a6d0a1e5c4929f19c8fb7a840bff39e110470b7563c107382ed29654d1 WatchSource:0}: Error finding container b738c5a6d0a1e5c4929f19c8fb7a840bff39e110470b7563c107382ed29654d1: Status 404 returned error can't find the container with id b738c5a6d0a1e5c4929f19c8fb7a840bff39e110470b7563c107382ed29654d1 Jan 06 08:17:25 crc kubenswrapper[4784]: W0106 08:17:25.585331 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podff8d015b_29e9_47bf_8735_eec268cb7d3a.slice/crio-e502ce19398b8beeff7a6786af782f11808a235033e00353fa6a1cc69bf6138a WatchSource:0}: Error finding container e502ce19398b8beeff7a6786af782f11808a235033e00353fa6a1cc69bf6138a: Status 404 returned error can't find the container with id e502ce19398b8beeff7a6786af782f11808a235033e00353fa6a1cc69bf6138a Jan 06 08:17:25 crc kubenswrapper[4784]: W0106 08:17:25.587335 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc0c8329e_40b2_4f88_8c03_9405383a057d.slice/crio-99aea63c6f6cad3282cf8935fc6c2aa95c212cc92820cf470bff7adf7a4ad0eb WatchSource:0}: Error finding container 99aea63c6f6cad3282cf8935fc6c2aa95c212cc92820cf470bff7adf7a4ad0eb: Status 404 returned error can't find the container with id 99aea63c6f6cad3282cf8935fc6c2aa95c212cc92820cf470bff7adf7a4ad0eb Jan 06 08:17:25 crc kubenswrapper[4784]: W0106 08:17:25.587700 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d67a630_478b_447f_8c40_6b26cbbcbe5e.slice/crio-c90eee9ea08c368bbb51f5b9c4922ce7b72885417ffe6ad8c40d07840c6b2f67 WatchSource:0}: Error finding container c90eee9ea08c368bbb51f5b9c4922ce7b72885417ffe6ad8c40d07840c6b2f67: Status 404 returned error can't find the container with id c90eee9ea08c368bbb51f5b9c4922ce7b72885417ffe6ad8c40d07840c6b2f67 Jan 06 08:17:25 crc kubenswrapper[4784]: W0106 08:17:25.589346 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod967f3bd4_283b_4918_b8d9_f0c810321a3d.slice/crio-f7eb4eeb0a4748ecd84d46fc9b6370c2973d2306183c415c8e2e0cedec753cc4 WatchSource:0}: Error finding container f7eb4eeb0a4748ecd84d46fc9b6370c2973d2306183c415c8e2e0cedec753cc4: Status 404 returned error can't find the container with id f7eb4eeb0a4748ecd84d46fc9b6370c2973d2306183c415c8e2e0cedec753cc4 Jan 06 08:17:25 crc kubenswrapper[4784]: W0106 08:17:25.593616 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda3e5f9ba_45cf_41fe_8942_8366faa1ebd1.slice/crio-092c27aac0191f8c225c96695e0e8d6679a6d0e7cffe0e48fca46547fa4b4f3a WatchSource:0}: Error finding container 092c27aac0191f8c225c96695e0e8d6679a6d0e7cffe0e48fca46547fa4b4f3a: Status 404 returned error can't find the container with id 092c27aac0191f8c225c96695e0e8d6679a6d0e7cffe0e48fca46547fa4b4f3a Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.627337 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-s54vm" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.640024 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.640348 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.140335746 +0000 UTC m=+148.186508583 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.648619 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.657434 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-s84bx" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.690078 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jcbhk" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.690133 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-mdssl" Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.740920 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.741072 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.241048092 +0000 UTC m=+148.287220929 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.741110 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.741485 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.241473649 +0000 UTC m=+148.287646486 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.819730 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pln6n"] Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.842091 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.842283 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.342256667 +0000 UTC m=+148.388429504 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.842453 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.842903 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.342895371 +0000 UTC m=+148.389068208 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.895239 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vkt2h"] Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.945225 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.945397 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.445371806 +0000 UTC m=+148.491544633 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:25 crc kubenswrapper[4784]: I0106 08:17:25.945628 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:25 crc kubenswrapper[4784]: E0106 08:17:25.945928 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.445914157 +0000 UTC m=+148.492086994 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:26 crc kubenswrapper[4784]: W0106 08:17:26.030775 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47c8d91e_1aa9_474c_ac9a_c1d4a43b0d97.slice/crio-10891dc501072be4bd8e8c67b9f94a3dfe66d77807a8f56248112a748b9660f6 WatchSource:0}: Error finding container 10891dc501072be4bd8e8c67b9f94a3dfe66d77807a8f56248112a748b9660f6: Status 404 returned error can't find the container with id 10891dc501072be4bd8e8c67b9f94a3dfe66d77807a8f56248112a748b9660f6 Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.046333 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:26 crc kubenswrapper[4784]: E0106 08:17:26.046682 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.546666744 +0000 UTC m=+148.592839581 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.056875 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm"] Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.148397 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:26 crc kubenswrapper[4784]: E0106 08:17:26.148762 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.648745892 +0000 UTC m=+148.694918729 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.176327 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" event={"ID":"0d67a630-478b-447f-8c40-6b26cbbcbe5e","Type":"ContainerStarted","Data":"c90eee9ea08c368bbb51f5b9c4922ce7b72885417ffe6ad8c40d07840c6b2f67"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.178595 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh" event={"ID":"75e1968d-8eee-46f2-b737-f33e8e48fbfd","Type":"ContainerStarted","Data":"299561880a081a07a535b2bd67a88fc391e5ea623f51e9e75d5e4b59a29456cb"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.186445 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" event={"ID":"156ba40d-dabf-4f2e-95ce-af4b456b78ac","Type":"ContainerStarted","Data":"531176da2b7f80fb8e9af93913da7986e035a078c8295b4de4bf6c2ad6b3064e"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.192355 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" event={"ID":"86a7348c-24b4-4138-83a9-0587e28e72e4","Type":"ContainerStarted","Data":"49822981b20da786ce18453d8fd73e4d7b4b0369ae67a380ac0a52ee9635e055"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.196306 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"012bc7e3bcedde5ff07b4d2868b56340e398c9c049d1204426ae0959fed6c058"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.201628 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"4ee6a65e754b192dcfa3aaef76576938d3818cbb6fe1ad4026874eb27c678256"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.203760 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-8c87n" event={"ID":"b1d67235-5081-4c66-acaa-0620c30e170e","Type":"ContainerStarted","Data":"3688eb03755bb2cd6e8e287eeee42f490e197603a46055dc92cf5c66884e66d4"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.226874 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" event={"ID":"ff8d015b-29e9-47bf-8735-eec268cb7d3a","Type":"ContainerStarted","Data":"e502ce19398b8beeff7a6786af782f11808a235033e00353fa6a1cc69bf6138a"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.229214 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" event={"ID":"722dd671-6776-4731-8055-795772f78c77","Type":"ContainerStarted","Data":"e5afe5c8c33b180b47c331859c38dfc88262c2f2b3eb1560b78e90b40e6a96a1"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.238271 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9" event={"ID":"f9a7a2ab-be43-46e9-a5a6-21c3100a55ef","Type":"ContainerStarted","Data":"63095233c911ab48faa3c03b807c8feeca1accc244ffe3e310befcc843b402a3"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.247917 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-l8w5x" event={"ID":"c0c8329e-40b2-4f88-8c03-9405383a057d","Type":"ContainerStarted","Data":"99aea63c6f6cad3282cf8935fc6c2aa95c212cc92820cf470bff7adf7a4ad0eb"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.248339 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qpchx"] Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.249437 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:26 crc kubenswrapper[4784]: E0106 08:17:26.249666 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.749624655 +0000 UTC m=+148.795797492 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.249830 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:26 crc kubenswrapper[4784]: E0106 08:17:26.251463 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.751449465 +0000 UTC m=+148.797622302 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.288804 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z"] Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.292434 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9qntj"] Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.304070 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"97635b1b7ef548ee62e5808bd23ba62891f4375cd39a4abad72f0ae7f50a2295"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.308862 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" event={"ID":"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97","Type":"ContainerStarted","Data":"10891dc501072be4bd8e8c67b9f94a3dfe66d77807a8f56248112a748b9660f6"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.313979 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" event={"ID":"a595e387-19b3-41f5-9330-3320991a6ca7","Type":"ContainerStarted","Data":"31495af97a61e5abb2fa5f04852474ed01cc120d89fa186f1c1a6bf91f89fe88"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.350505 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:26 crc kubenswrapper[4784]: E0106 08:17:26.350816 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.850789148 +0000 UTC m=+148.896961985 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.350874 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:26 crc kubenswrapper[4784]: E0106 08:17:26.351216 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.851209215 +0000 UTC m=+148.897382052 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.417865 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" event={"ID":"58b87341-53f1-4b38-807b-964e45e69986","Type":"ContainerStarted","Data":"134569ceee786eef2819cab1a02ab05e2acb623f785e81f0fb757ee2e46b9f91"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.418214 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" event={"ID":"c0033eba-a627-460c-b782-04628acbadcf","Type":"ContainerStarted","Data":"c48959a35336129407068327e1e0468477522e6412a8b6d12f271aa847e55a40"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.418230 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" event={"ID":"2cb40f53-37df-4f44-9bd6-cfb855f08935","Type":"ContainerStarted","Data":"36af246788c36d7084834bf4eaab6f9d79d0145d1b2d364f500b48793e0eba52"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.418258 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-q2d7x" event={"ID":"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0","Type":"ContainerStarted","Data":"b738c5a6d0a1e5c4929f19c8fb7a840bff39e110470b7563c107382ed29654d1"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.418275 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" event={"ID":"967f3bd4-283b-4918-b8d9-f0c810321a3d","Type":"ContainerStarted","Data":"f7eb4eeb0a4748ecd84d46fc9b6370c2973d2306183c415c8e2e0cedec753cc4"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.418295 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" event={"ID":"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1","Type":"ContainerStarted","Data":"092c27aac0191f8c225c96695e0e8d6679a6d0e7cffe0e48fca46547fa4b4f3a"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.418308 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-x7h7q" event={"ID":"61415408-e654-4546-b523-1ce57ecef357","Type":"ContainerStarted","Data":"c043543ac9df2d051f02a482c8d1f6cb348bee79a61cb9e9d774562655186e3c"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.418323 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qn96z" event={"ID":"e9337b68-6ba6-41fb-8b01-fe8e77a6a051","Type":"ContainerStarted","Data":"0e69d65af53e92dd2338edca387816c5445c1d2f67c20e0a7000a9ca4dff87a6"} Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.451399 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:26 crc kubenswrapper[4784]: E0106 08:17:26.451651 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.95163522 +0000 UTC m=+148.997808057 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.451759 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:26 crc kubenswrapper[4784]: E0106 08:17:26.452045 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:26.952037555 +0000 UTC m=+148.998210392 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.552822 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:26 crc kubenswrapper[4784]: E0106 08:17:26.553088 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:27.053008081 +0000 UTC m=+149.099180918 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.553355 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:26 crc kubenswrapper[4784]: E0106 08:17:26.553689 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:27.053673477 +0000 UTC m=+149.099846314 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.654149 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:26 crc kubenswrapper[4784]: E0106 08:17:26.655134 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:27.15511701 +0000 UTC m=+149.201289847 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.683968 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m"] Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.700326 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5"] Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.716644 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.719743 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x"] Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.757047 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:26 crc kubenswrapper[4784]: E0106 08:17:26.757408 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:27.257393407 +0000 UTC m=+149.303566244 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.853221 4784 patch_prober.go:28] interesting pod/router-default-5444994796-8c87n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 06 08:17:26 crc kubenswrapper[4784]: [-]has-synced failed: reason withheld Jan 06 08:17:26 crc kubenswrapper[4784]: [+]process-running ok Jan 06 08:17:26 crc kubenswrapper[4784]: healthz check failed Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.853695 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8c87n" podUID="b1d67235-5081-4c66-acaa-0620c30e170e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.858338 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:26 crc kubenswrapper[4784]: E0106 08:17:26.858773 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:27.358753148 +0000 UTC m=+149.404925995 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.902562 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-8c87n" podStartSLOduration=125.902510157 podStartE2EDuration="2m5.902510157s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:26.877142654 +0000 UTC m=+148.923315481" watchObservedRunningTime="2026-01-06 08:17:26.902510157 +0000 UTC m=+148.948682994" Jan 06 08:17:26 crc kubenswrapper[4784]: I0106 08:17:26.959755 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:26 crc kubenswrapper[4784]: E0106 08:17:26.960044 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:27.460032775 +0000 UTC m=+149.506205612 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.060770 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:27 crc kubenswrapper[4784]: E0106 08:17:27.061202 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:27.561185218 +0000 UTC m=+149.607358055 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.164113 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:27 crc kubenswrapper[4784]: E0106 08:17:27.164743 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:27.664730203 +0000 UTC m=+149.710903040 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.270118 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:27 crc kubenswrapper[4784]: E0106 08:17:27.270656 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:27.770637799 +0000 UTC m=+149.816810646 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.371132 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:27 crc kubenswrapper[4784]: E0106 08:17:27.371694 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:27.871682507 +0000 UTC m=+149.917855344 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.388791 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-x7h7q" event={"ID":"61415408-e654-4546-b523-1ce57ecef357","Type":"ContainerStarted","Data":"ebf581e24a4ccb31bad9cfe807c891aeba7124deb39cdb8ca30708846af929af"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.400210 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-s54vm"] Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.403876 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-l8w5x" event={"ID":"c0c8329e-40b2-4f88-8c03-9405383a057d","Type":"ContainerStarted","Data":"e999981be4b4d17cf3e8568aa7ec0548c16358a35bb0c46a07dcfa6f67d93eec"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.404995 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.411072 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-x7h7q" podStartSLOduration=5.411058529 podStartE2EDuration="5.411058529s" podCreationTimestamp="2026-01-06 08:17:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:27.405750605 +0000 UTC m=+149.451923442" watchObservedRunningTime="2026-01-06 08:17:27.411058529 +0000 UTC m=+149.457231366" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.414339 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r"] Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.417272 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"1b885d16d4f9fd4cc116954c2cd74dbaa03c3fdc45ff6005cc5eabd7ebe00df8"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.417698 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.426637 4784 patch_prober.go:28] interesting pod/console-operator-58897d9998-l8w5x container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/readyz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.426679 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-l8w5x" podUID="c0c8329e-40b2-4f88-8c03-9405383a057d" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/readyz\": dial tcp 10.217.0.22:8443: connect: connection refused" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.437254 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-l8w5x" podStartSLOduration=126.437238364 podStartE2EDuration="2m6.437238364s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:27.427143777 +0000 UTC m=+149.473316614" watchObservedRunningTime="2026-01-06 08:17:27.437238364 +0000 UTC m=+149.483411201" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.442293 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-pl4c4"] Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.442655 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" event={"ID":"c0033eba-a627-460c-b782-04628acbadcf","Type":"ContainerStarted","Data":"4b857d9fd4e63aa1af93cbf61cafd130471a6021e1d9aa0a75c59a654514a34a"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.455950 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk"] Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.459217 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jprht"] Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.460889 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx"] Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.464209 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" event={"ID":"3c94b199-d756-4ba6-8b21-a491a98cc75b","Type":"ContainerStarted","Data":"6ee6cdda625abaaa33df4ceb0e2a363da635998a6b9feb9cdea0a938f5e818e2"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.477062 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:27 crc kubenswrapper[4784]: E0106 08:17:27.477326 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:27.977300432 +0000 UTC m=+150.023473319 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.477464 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:27 crc kubenswrapper[4784]: E0106 08:17:27.477833 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:27.977826113 +0000 UTC m=+150.023998950 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.480127 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-mdssl"] Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.483991 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"fc20858f3287b48f51cfdeffd8a79e545f3e115dd0a4fd07b296704227e7bf6e"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.487245 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-krcbh"] Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.506871 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" event={"ID":"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97","Type":"ContainerStarted","Data":"cd1b1691f2343bdaab939532ded996460d6a9f7233c48e8807f0e912318e08b7"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.507829 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.524729 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" event={"ID":"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1","Type":"ContainerStarted","Data":"3d66d10b387077b8ae77936e6b44e70bf0802fd6207ee7bc9bdd7286642c1220"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.526818 4784 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-vkt2h container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.526872 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" podUID="47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.538773 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg"] Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.545760 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9qntj" event={"ID":"655f5b8f-c824-4ef7-aa83-19dcfa0fee56","Type":"ContainerStarted","Data":"790e3d9d8cdd5657f0b429c217956f50938466acfc731dc00563037b0c7b6a93"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.554687 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87"] Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.563091 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-z56qx"] Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.570721 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" podStartSLOduration=125.570706377 podStartE2EDuration="2m5.570706377s" podCreationTimestamp="2026-01-06 08:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:27.562861776 +0000 UTC m=+149.609034613" watchObservedRunningTime="2026-01-06 08:17:27.570706377 +0000 UTC m=+149.616879214" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.572841 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq"] Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.581148 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:27 crc kubenswrapper[4784]: E0106 08:17:27.582160 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:28.082141346 +0000 UTC m=+150.128314183 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.585476 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"5b74adcc5e47a8fea9c97271c94ef67e3c8a3337d4286d91bd184d522175af55"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.601471 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" event={"ID":"ff8d015b-29e9-47bf-8735-eec268cb7d3a","Type":"ContainerStarted","Data":"980bba8499224aebf92c938999d05fa566aed8545fce7a6f83e4ce4d764459a8"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.603466 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:27 crc kubenswrapper[4784]: W0106 08:17:27.620138 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9fa124c5_65e7_4744_867f_734ef10ca7c2.slice/crio-fb74ad1d3fd88423e6855a8d0491dc0651f1729e5963e6a49b8a40d44b0cfeca WatchSource:0}: Error finding container fb74ad1d3fd88423e6855a8d0491dc0651f1729e5963e6a49b8a40d44b0cfeca: Status 404 returned error can't find the container with id fb74ad1d3fd88423e6855a8d0491dc0651f1729e5963e6a49b8a40d44b0cfeca Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.633964 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jcbhk"] Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.634016 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh" event={"ID":"75e1968d-8eee-46f2-b737-f33e8e48fbfd","Type":"ContainerStarted","Data":"d3c20b2ad121f66e6b4cde34cf0f7e2c5331be571033884729cf7d2b22ee9b28"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.647891 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.652651 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-fxbll"] Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.662294 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-s84bx"] Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.669490 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9" event={"ID":"f9a7a2ab-be43-46e9-a5a6-21c3100a55ef","Type":"ContainerStarted","Data":"54dee18166d75eef0a76062b2ffc3d053e5b926cce077b3fc9eaaf0436152aff"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.679826 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" podStartSLOduration=126.679808805 podStartE2EDuration="2m6.679808805s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:27.674439099 +0000 UTC m=+149.720611936" watchObservedRunningTime="2026-01-06 08:17:27.679808805 +0000 UTC m=+149.725981642" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.682474 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:27 crc kubenswrapper[4784]: E0106 08:17:27.682751 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:28.182740558 +0000 UTC m=+150.228913395 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.706193 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qn96z" event={"ID":"e9337b68-6ba6-41fb-8b01-fe8e77a6a051","Type":"ContainerStarted","Data":"47b47d2a76076bd6535603da02ae2b23aee01ecf819d405ddd9fc98f69b00e00"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.725870 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" event={"ID":"58b87341-53f1-4b38-807b-964e45e69986","Type":"ContainerStarted","Data":"1758ac25d01feee91c246617a0972255f28a1660cfbf677628870e9f3345643d"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.726685 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.727195 4784 patch_prober.go:28] interesting pod/router-default-5444994796-8c87n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 06 08:17:27 crc kubenswrapper[4784]: [-]has-synced failed: reason withheld Jan 06 08:17:27 crc kubenswrapper[4784]: [+]process-running ok Jan 06 08:17:27 crc kubenswrapper[4784]: healthz check failed Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.727243 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8c87n" podUID="b1d67235-5081-4c66-acaa-0620c30e170e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:27 crc kubenswrapper[4784]: W0106 08:17:27.748093 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf114d139_4120_49ed_a3a2_bf85c2cb3a84.slice/crio-6483bc3675f5f98ebb7accdebb597a84b48f2f70781ebffe8d9e62053ee3e1ed WatchSource:0}: Error finding container 6483bc3675f5f98ebb7accdebb597a84b48f2f70781ebffe8d9e62053ee3e1ed: Status 404 returned error can't find the container with id 6483bc3675f5f98ebb7accdebb597a84b48f2f70781ebffe8d9e62053ee3e1ed Jan 06 08:17:27 crc kubenswrapper[4784]: W0106 08:17:27.749463 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbcdfd6e9_1825_44a3_9255_733080dd11d9.slice/crio-d91e6ab5c806cedaa45e9e1aea84b8ff40deade7b3793ead7f9ec9a7a2182a78 WatchSource:0}: Error finding container d91e6ab5c806cedaa45e9e1aea84b8ff40deade7b3793ead7f9ec9a7a2182a78: Status 404 returned error can't find the container with id d91e6ab5c806cedaa45e9e1aea84b8ff40deade7b3793ead7f9ec9a7a2182a78 Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.751919 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" event={"ID":"a595e387-19b3-41f5-9330-3320991a6ca7","Type":"ContainerStarted","Data":"3a9c5b41d18a268c0cb3acbbc8fee48b68a501789331cbdf38b0c3296983fd77"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.751969 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" event={"ID":"a595e387-19b3-41f5-9330-3320991a6ca7","Type":"ContainerStarted","Data":"12fe6cdb96abe0e484d7f586e7e2bc6cf5dc835b642c568517f533fb78127703"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.775074 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.777971 4784 generic.go:334] "Generic (PLEG): container finished" podID="722dd671-6776-4731-8055-795772f78c77" containerID="d6a374a0bc8875b75f6622772f81730acd78a79e9dd58c45e00618767f10fe41" exitCode=0 Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.778952 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" event={"ID":"722dd671-6776-4731-8055-795772f78c77","Type":"ContainerDied","Data":"d6a374a0bc8875b75f6622772f81730acd78a79e9dd58c45e00618767f10fe41"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.783027 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:27 crc kubenswrapper[4784]: E0106 08:17:27.783920 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:28.283906452 +0000 UTC m=+150.330079289 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.799517 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" event={"ID":"2cb40f53-37df-4f44-9bd6-cfb855f08935","Type":"ContainerStarted","Data":"46c75271bcc3bf0a234668cb9ce5c50caad6549161ede72f34cb6cb6887fc2fe"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.816370 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqglh" podStartSLOduration=126.816354348 podStartE2EDuration="2m6.816354348s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:27.698716802 +0000 UTC m=+149.744889639" watchObservedRunningTime="2026-01-06 08:17:27.816354348 +0000 UTC m=+149.862527185" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.821926 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" event={"ID":"480d6bb7-ba40-4925-bdab-c536df24feb3","Type":"ContainerStarted","Data":"a0b696ba2e23645553153afc89613c06021935bb7e8c3f581217ad39a856d040"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.822958 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.833730 4784 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-6bjr5 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" start-of-body= Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.833814 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" podUID="480d6bb7-ba40-4925-bdab-c536df24feb3" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.833748 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x" event={"ID":"bebca40e-c6cf-4932-bd25-fae039fc8607","Type":"ContainerStarted","Data":"f203e3585948d1f3e2caf053c7616c428461db6dea33958a7211c26b11fbe600"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.855483 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" event={"ID":"967f3bd4-283b-4918-b8d9-f0c810321a3d","Type":"ContainerStarted","Data":"14e2020e0ca6c68aeda2d2d12218e1ff9820ea62b8134025a5e069669ec109ff"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.856244 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.884901 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qpchx" event={"ID":"d9a30f78-3dc1-4289-9560-c385125df7de","Type":"ContainerStarted","Data":"4c7d427ff480ee3301b1d4cc5bea67a4622ea241f831a862ade55e956ebfcbcd"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.884946 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qpchx" event={"ID":"d9a30f78-3dc1-4289-9560-c385125df7de","Type":"ContainerStarted","Data":"d7bafa6e19cdf863dfe5bc79ea1d8b01912ec62188c5530757293dfb34045c16"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.886155 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:27 crc kubenswrapper[4784]: E0106 08:17:27.886422 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:28.386409757 +0000 UTC m=+150.432582594 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.886761 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.936665 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" event={"ID":"c6a86933-d0ee-4f96-a9d5-265e4eeceac3","Type":"ContainerStarted","Data":"dfc8b8d2a9c34d94a16ebf069d4fbafb836d679cb0c3f7c548b8fffd73dc43da"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.954078 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nb4n9" podStartSLOduration=126.954062323 podStartE2EDuration="2m6.954062323s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:27.90135107 +0000 UTC m=+149.947523907" watchObservedRunningTime="2026-01-06 08:17:27.954062323 +0000 UTC m=+150.000235160" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.954911 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" podStartSLOduration=125.954905236 podStartE2EDuration="2m5.954905236s" podCreationTimestamp="2026-01-06 08:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:27.953795743 +0000 UTC m=+149.999968580" watchObservedRunningTime="2026-01-06 08:17:27.954905236 +0000 UTC m=+150.001078073" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.967521 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" event={"ID":"8d0f11e4-cf5f-414f-ab5c-71c303b6774c","Type":"ContainerStarted","Data":"e7413dabbe427464ca31d45316401d8d1df88787c131a29dfedf29b7876a980e"} Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.986319 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r2xv6" podStartSLOduration=125.986294591 podStartE2EDuration="2m5.986294591s" podCreationTimestamp="2026-01-06 08:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:27.982705153 +0000 UTC m=+150.028877990" watchObservedRunningTime="2026-01-06 08:17:27.986294591 +0000 UTC m=+150.032467428" Jan 06 08:17:27 crc kubenswrapper[4784]: I0106 08:17:27.996440 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:27 crc kubenswrapper[4784]: E0106 08:17:27.997666 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:28.497651507 +0000 UTC m=+150.543824344 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.016872 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qpchx" podStartSLOduration=127.016857284 podStartE2EDuration="2m7.016857284s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:28.015624767 +0000 UTC m=+150.061797594" watchObservedRunningTime="2026-01-06 08:17:28.016857284 +0000 UTC m=+150.063030121" Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.098060 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:28 crc kubenswrapper[4784]: E0106 08:17:28.099936 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:28.599924623 +0000 UTC m=+150.646097460 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.123600 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" podStartSLOduration=126.122758509 podStartE2EDuration="2m6.122758509s" podCreationTimestamp="2026-01-06 08:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:28.082108869 +0000 UTC m=+150.128281706" watchObservedRunningTime="2026-01-06 08:17:28.122758509 +0000 UTC m=+150.168931346" Jan 06 08:17:28 crc kubenswrapper[4784]: E0106 08:17:28.199723 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:28.699700023 +0000 UTC m=+150.745872860 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.199768 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.200083 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:28 crc kubenswrapper[4784]: E0106 08:17:28.200479 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:28.700467532 +0000 UTC m=+150.746640369 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.226779 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x" podStartSLOduration=127.226763332 podStartE2EDuration="2m7.226763332s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:28.192821949 +0000 UTC m=+150.238994786" watchObservedRunningTime="2026-01-06 08:17:28.226763332 +0000 UTC m=+150.272936169" Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.266375 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-hkq78" podStartSLOduration=126.266352812 podStartE2EDuration="2m6.266352812s" podCreationTimestamp="2026-01-06 08:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:28.23401661 +0000 UTC m=+150.280189447" watchObservedRunningTime="2026-01-06 08:17:28.266352812 +0000 UTC m=+150.312525649" Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.295678 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" podStartSLOduration=127.295662016 podStartE2EDuration="2m7.295662016s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:28.293872408 +0000 UTC m=+150.340045275" watchObservedRunningTime="2026-01-06 08:17:28.295662016 +0000 UTC m=+150.341834853" Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.299004 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" podStartSLOduration=127.298992794 podStartE2EDuration="2m7.298992794s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:28.268084058 +0000 UTC m=+150.314256895" watchObservedRunningTime="2026-01-06 08:17:28.298992794 +0000 UTC m=+150.345165631" Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.302825 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:28 crc kubenswrapper[4784]: E0106 08:17:28.303077 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:28.803049941 +0000 UTC m=+150.849222778 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.303246 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:28 crc kubenswrapper[4784]: E0106 08:17:28.303788 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:28.803771298 +0000 UTC m=+150.849944145 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.413401 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:28 crc kubenswrapper[4784]: E0106 08:17:28.413682 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:28.913667056 +0000 UTC m=+150.959839893 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.515260 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:28 crc kubenswrapper[4784]: E0106 08:17:28.515700 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:29.015684333 +0000 UTC m=+151.061857170 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.618788 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:28 crc kubenswrapper[4784]: E0106 08:17:28.619127 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:29.119113183 +0000 UTC m=+151.165286020 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.720181 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:28 crc kubenswrapper[4784]: E0106 08:17:28.720911 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:29.22089875 +0000 UTC m=+151.267071587 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.731332 4784 patch_prober.go:28] interesting pod/router-default-5444994796-8c87n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 06 08:17:28 crc kubenswrapper[4784]: [-]has-synced failed: reason withheld Jan 06 08:17:28 crc kubenswrapper[4784]: [+]process-running ok Jan 06 08:17:28 crc kubenswrapper[4784]: healthz check failed Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.731391 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8c87n" podUID="b1d67235-5081-4c66-acaa-0620c30e170e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.825282 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:28 crc kubenswrapper[4784]: E0106 08:17:28.825491 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:29.325478575 +0000 UTC m=+151.371651412 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:28 crc kubenswrapper[4784]: I0106 08:17:28.932353 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:28 crc kubenswrapper[4784]: E0106 08:17:28.948149 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:29.448131473 +0000 UTC m=+151.494304310 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.012841 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-q2d7x" event={"ID":"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0","Type":"ContainerStarted","Data":"d1599b6f3347f59f8c03e2058e79726d968026fe2648b65de6526d3eac3ca88f"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.034728 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lmk7x" event={"ID":"bebca40e-c6cf-4932-bd25-fae039fc8607","Type":"ContainerStarted","Data":"4abaa920fb4e3d6b4ff62c652c5aba1799232926a77efd0274382aac2572bde2"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.036066 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:29 crc kubenswrapper[4784]: E0106 08:17:29.036492 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:29.536468495 +0000 UTC m=+151.582641332 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.077694 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jcbhk" event={"ID":"951edc85-0103-4d00-b5b7-960e4c3a40e9","Type":"ContainerStarted","Data":"66eb437fcb1ffb3c63021094c4a2da0d6a8f353afb4f85ae1231869931391863"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.132476 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" event={"ID":"c0033eba-a627-460c-b782-04628acbadcf","Type":"ContainerStarted","Data":"b3b17016ae551acb2901d0d88dad726e97343473388b3e09e849f218f84afbd2"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.139091 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:29 crc kubenswrapper[4784]: E0106 08:17:29.140510 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:29.640484528 +0000 UTC m=+151.686657365 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.150080 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" event={"ID":"8d0f11e4-cf5f-414f-ab5c-71c303b6774c","Type":"ContainerStarted","Data":"899cda17276079630624cd1538ddc73bdf2b5c344d788f36b118ea6ebafad9e9"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.242839 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:29 crc kubenswrapper[4784]: E0106 08:17:29.243830 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:29.743813924 +0000 UTC m=+151.789986761 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.256762 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg" event={"ID":"1c6d40e7-7e34-46bc-9226-307ed8f18a90","Type":"ContainerStarted","Data":"3f34a3bc5d7d771638f2386b3ad5cde5f706ba8e2f5d059dec82d186570f78a3"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.273897 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-fxbll" event={"ID":"f114d139-4120-49ed-a3a2-bf85c2cb3a84","Type":"ContainerStarted","Data":"6483bc3675f5f98ebb7accdebb597a84b48f2f70781ebffe8d9e62053ee3e1ed"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.284212 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qn96z" event={"ID":"e9337b68-6ba6-41fb-8b01-fe8e77a6a051","Type":"ContainerStarted","Data":"3f71c174b1a95068e2860c83b3dba04781f8923dba2d17e658cc1eb56e81ec78"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.305054 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8l2cs"] Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.306153 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.334927 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.347122 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:29 crc kubenswrapper[4784]: E0106 08:17:29.347576 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:29.847539106 +0000 UTC m=+151.893711943 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.347990 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq" event={"ID":"556124eb-b442-4918-a98f-c40a2adf4178","Type":"ContainerStarted","Data":"17e7ef43fb5836f87f2646d2f2eaf4b3915577a86dc860c43f4c98676bb16153"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.348051 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq" event={"ID":"556124eb-b442-4918-a98f-c40a2adf4178","Type":"ContainerStarted","Data":"a59c2891f24c36f7c857f2f507a74bb84eaa6a1946a12547e3c4008ea69f7b69"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.386072 4784 generic.go:334] "Generic (PLEG): container finished" podID="2cb40f53-37df-4f44-9bd6-cfb855f08935" containerID="46c75271bcc3bf0a234668cb9ce5c50caad6549161ede72f34cb6cb6887fc2fe" exitCode=0 Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.386177 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" event={"ID":"2cb40f53-37df-4f44-9bd6-cfb855f08935","Type":"ContainerDied","Data":"46c75271bcc3bf0a234668cb9ce5c50caad6549161ede72f34cb6cb6887fc2fe"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.388395 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-q2d7x" podStartSLOduration=128.388380863 podStartE2EDuration="2m8.388380863s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:29.369003359 +0000 UTC m=+151.415176206" watchObservedRunningTime="2026-01-06 08:17:29.388380863 +0000 UTC m=+151.434553700" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.390672 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8l2cs"] Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.407901 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-krcbh" event={"ID":"f7787a3d-2716-4923-8b48-9dd09b459af9","Type":"ContainerStarted","Data":"5ce342d1eabd9690c6541492bc9752915e5600ce36ffec21830e2e410e9cfd82"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.433258 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-mdssl" event={"ID":"d812b40c-8f10-414b-9e2b-43a25c2f4430","Type":"ContainerStarted","Data":"67d3479a16f9dfca45fc5a8cc519929998b2789a8a892d2eb16c9390697a75b4"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.433300 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-mdssl" event={"ID":"d812b40c-8f10-414b-9e2b-43a25c2f4430","Type":"ContainerStarted","Data":"353f0e6cc3b6c161431c8c67e0cf07310436f9df5f590192a5fe86a11aeaeca4"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.451308 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.451440 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzd2v\" (UniqueName: \"kubernetes.io/projected/d8eba439-a397-4536-8b2e-cde21cfc1384-kube-api-access-wzd2v\") pod \"community-operators-8l2cs\" (UID: \"d8eba439-a397-4536-8b2e-cde21cfc1384\") " pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.451580 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8eba439-a397-4536-8b2e-cde21cfc1384-utilities\") pod \"community-operators-8l2cs\" (UID: \"d8eba439-a397-4536-8b2e-cde21cfc1384\") " pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.451612 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8eba439-a397-4536-8b2e-cde21cfc1384-catalog-content\") pod \"community-operators-8l2cs\" (UID: \"d8eba439-a397-4536-8b2e-cde21cfc1384\") " pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:17:29 crc kubenswrapper[4784]: E0106 08:17:29.451697 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:29.951683043 +0000 UTC m=+151.997855880 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.479502 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" event={"ID":"62ee35e0-2668-4abf-8984-0da411603434","Type":"ContainerStarted","Data":"62b2bf0cf5f6e0169c67820b42d9553df6b10aceee07eaf6b8b5c225c52f33d8"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.482424 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" event={"ID":"62ee35e0-2668-4abf-8984-0da411603434","Type":"ContainerStarted","Data":"867d3525b4772123a4f172e7dcacef4b6525ed2fca31bb9403d88dc87f4ffddf"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.482456 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.514504 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r" event={"ID":"50cf8327-9f32-4674-99ad-8fc015380ac6","Type":"ContainerStarted","Data":"41e395db2cb65bf539992194a7e185298f841eede1a590f0d257dec210cf4ecc"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.514574 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r" event={"ID":"50cf8327-9f32-4674-99ad-8fc015380ac6","Type":"ContainerStarted","Data":"03706fc14a15f9c0be5018a524ed69d6b485be74bf7aa595039283d77b2d2205"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.515240 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jgfwh"] Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.515669 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5q85" podStartSLOduration=127.515639289 podStartE2EDuration="2m7.515639289s" podCreationTimestamp="2026-01-06 08:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:29.506423655 +0000 UTC m=+151.552596492" watchObservedRunningTime="2026-01-06 08:17:29.515639289 +0000 UTC m=+151.561812126" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.516146 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.521424 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.528729 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk" event={"ID":"21ac7bdf-1788-4bc1-b777-5eb6290b4fb8","Type":"ContainerStarted","Data":"f2e11c2360a28dfecd59c6c5f8ff33cd01839ac1b1653530ffc08df36b7b9560"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.528765 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk" event={"ID":"21ac7bdf-1788-4bc1-b777-5eb6290b4fb8","Type":"ContainerStarted","Data":"381d08e4d50efccb8993cf32b82ff58d188df72146526f9d35f1910bf38f57c0"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.558702 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jgfwh"] Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.568775 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.569136 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" event={"ID":"9fa124c5-65e7-4744-867f-734ef10ca7c2","Type":"ContainerStarted","Data":"a62e1512fea1fab2098fd55c00854e4a093465641535ddce6a9fa55d6a3cb093"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.569168 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" event={"ID":"9fa124c5-65e7-4744-867f-734ef10ca7c2","Type":"ContainerStarted","Data":"fb74ad1d3fd88423e6855a8d0491dc0651f1729e5963e6a49b8a40d44b0cfeca"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.581325 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8eba439-a397-4536-8b2e-cde21cfc1384-utilities\") pod \"community-operators-8l2cs\" (UID: \"d8eba439-a397-4536-8b2e-cde21cfc1384\") " pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.581400 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8eba439-a397-4536-8b2e-cde21cfc1384-catalog-content\") pod \"community-operators-8l2cs\" (UID: \"d8eba439-a397-4536-8b2e-cde21cfc1384\") " pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.581468 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzd2v\" (UniqueName: \"kubernetes.io/projected/d8eba439-a397-4536-8b2e-cde21cfc1384-kube-api-access-wzd2v\") pod \"community-operators-8l2cs\" (UID: \"d8eba439-a397-4536-8b2e-cde21cfc1384\") " pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.581507 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.582666 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8eba439-a397-4536-8b2e-cde21cfc1384-utilities\") pod \"community-operators-8l2cs\" (UID: \"d8eba439-a397-4536-8b2e-cde21cfc1384\") " pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.605417 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-z2j87" podStartSLOduration=127.605385974 podStartE2EDuration="2m7.605385974s" podCreationTimestamp="2026-01-06 08:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:29.594749545 +0000 UTC m=+151.640922392" watchObservedRunningTime="2026-01-06 08:17:29.605385974 +0000 UTC m=+151.651558811" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.614507 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8eba439-a397-4536-8b2e-cde21cfc1384-catalog-content\") pod \"community-operators-8l2cs\" (UID: \"d8eba439-a397-4536-8b2e-cde21cfc1384\") " pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:17:29 crc kubenswrapper[4784]: E0106 08:17:29.615024 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:30.115007533 +0000 UTC m=+152.161180370 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.619303 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx" event={"ID":"93589e54-9797-4be3-8daf-52530390571d","Type":"ContainerStarted","Data":"38d26763b5c5929cc541eff45e1e5eb2e239656f9b7924ae097f6e8a27373584"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.634906 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9qntj" event={"ID":"655f5b8f-c824-4ef7-aa83-19dcfa0fee56","Type":"ContainerStarted","Data":"746ab48acd77c4c42c452bf923009e4c3276d3debba59e4a133d1f005c89c647"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.670095 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-mdssl" podStartSLOduration=127.670075756 podStartE2EDuration="2m7.670075756s" podCreationTimestamp="2026-01-06 08:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:29.653021113 +0000 UTC m=+151.699193960" watchObservedRunningTime="2026-01-06 08:17:29.670075756 +0000 UTC m=+151.716248593" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.670410 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-l4mh8"] Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.686648 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg" podStartSLOduration=128.686630873 podStartE2EDuration="2m8.686630873s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:29.68604015 +0000 UTC m=+151.732212997" watchObservedRunningTime="2026-01-06 08:17:29.686630873 +0000 UTC m=+151.732803710" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.697821 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.698527 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6eac71f-1d65-4542-9722-211fee770bba-utilities\") pod \"certified-operators-jgfwh\" (UID: \"b6eac71f-1d65-4542-9722-211fee770bba\") " pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.698834 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7dlj\" (UniqueName: \"kubernetes.io/projected/b6eac71f-1d65-4542-9722-211fee770bba-kube-api-access-l7dlj\") pod \"certified-operators-jgfwh\" (UID: \"b6eac71f-1d65-4542-9722-211fee770bba\") " pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.698879 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6eac71f-1d65-4542-9722-211fee770bba-catalog-content\") pod \"certified-operators-jgfwh\" (UID: \"b6eac71f-1d65-4542-9722-211fee770bba\") " pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:17:29 crc kubenswrapper[4784]: E0106 08:17:29.701258 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:30.201221942 +0000 UTC m=+152.247394779 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.734165 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzd2v\" (UniqueName: \"kubernetes.io/projected/d8eba439-a397-4536-8b2e-cde21cfc1384-kube-api-access-wzd2v\") pod \"community-operators-8l2cs\" (UID: \"d8eba439-a397-4536-8b2e-cde21cfc1384\") " pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.750742 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-s54vm" event={"ID":"508a53a9-5d09-4f87-a763-4ddaac552f24","Type":"ContainerStarted","Data":"48327b08b7c8423b9e3809f23678cf2f3a964aa20d5cd5fd7c50216542424825"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.750816 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-s54vm" event={"ID":"508a53a9-5d09-4f87-a763-4ddaac552f24","Type":"ContainerStarted","Data":"ae1c20ed0526350870609a3848fb2a09f691df8f5793c3da14d1874753c77f25"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.750835 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l4mh8"] Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.751220 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.860492 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7dlj\" (UniqueName: \"kubernetes.io/projected/b6eac71f-1d65-4542-9722-211fee770bba-kube-api-access-l7dlj\") pod \"certified-operators-jgfwh\" (UID: \"b6eac71f-1d65-4542-9722-211fee770bba\") " pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.860576 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6eac71f-1d65-4542-9722-211fee770bba-catalog-content\") pod \"certified-operators-jgfwh\" (UID: \"b6eac71f-1d65-4542-9722-211fee770bba\") " pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.860634 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.860743 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6eac71f-1d65-4542-9722-211fee770bba-utilities\") pod \"certified-operators-jgfwh\" (UID: \"b6eac71f-1d65-4542-9722-211fee770bba\") " pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.869624 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" event={"ID":"156ba40d-dabf-4f2e-95ce-af4b456b78ac","Type":"ContainerStarted","Data":"3ea5da30fd1e998e778eb76242b4904e4ee107640518c4ddb002afade07c6fe5"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.870777 4784 patch_prober.go:28] interesting pod/router-default-5444994796-8c87n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 06 08:17:29 crc kubenswrapper[4784]: [-]has-synced failed: reason withheld Jan 06 08:17:29 crc kubenswrapper[4784]: [+]process-running ok Jan 06 08:17:29 crc kubenswrapper[4784]: healthz check failed Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.870832 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8c87n" podUID="b1d67235-5081-4c66-acaa-0620c30e170e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:29 crc kubenswrapper[4784]: E0106 08:17:29.872851 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:30.37283222 +0000 UTC m=+152.419005057 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.873621 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6eac71f-1d65-4542-9722-211fee770bba-utilities\") pod \"certified-operators-jgfwh\" (UID: \"b6eac71f-1d65-4542-9722-211fee770bba\") " pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.873774 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6eac71f-1d65-4542-9722-211fee770bba-catalog-content\") pod \"certified-operators-jgfwh\" (UID: \"b6eac71f-1d65-4542-9722-211fee770bba\") " pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.883629 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qn96z" podStartSLOduration=128.883601793 podStartE2EDuration="2m8.883601793s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:29.726925859 +0000 UTC m=+151.773098696" watchObservedRunningTime="2026-01-06 08:17:29.883601793 +0000 UTC m=+151.929774630" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.894929 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" event={"ID":"86a7348c-24b4-4138-83a9-0587e28e72e4","Type":"ContainerStarted","Data":"b221fcd4b37b92e1e240a994e989bf8480099df131253fc56f5d95560d0da85b"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.896098 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.904512 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.907640 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-q2b9x"] Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.909926 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.929732 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7dlj\" (UniqueName: \"kubernetes.io/projected/b6eac71f-1d65-4542-9722-211fee770bba-kube-api-access-l7dlj\") pod \"certified-operators-jgfwh\" (UID: \"b6eac71f-1d65-4542-9722-211fee770bba\") " pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.933850 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" event={"ID":"480d6bb7-ba40-4925-bdab-c536df24feb3","Type":"ContainerStarted","Data":"c5637cec5a9103648091b5c3ec74e3d9a654f1f3b86904ce96dee13e9d66e292"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.934773 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q2b9x"] Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.935498 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-pl4c4" podStartSLOduration=128.935472545 podStartE2EDuration="2m8.935472545s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:29.920143446 +0000 UTC m=+151.966316293" watchObservedRunningTime="2026-01-06 08:17:29.935472545 +0000 UTC m=+151.981645382" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.937814 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.957614 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.958209 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-z56qx" event={"ID":"f554e179-1e70-4afc-b17b-e3fdc0c13fe3","Type":"ContainerStarted","Data":"1492ca5b6a28b472edf3dacb2d1035092da7a6bdcd4e90dd616b1213b6064f85"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.958264 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-z56qx" event={"ID":"f554e179-1e70-4afc-b17b-e3fdc0c13fe3","Type":"ContainerStarted","Data":"27a210c5ca56177ec2f3f909ecfde81f3c57940c9b917d6bef4ec59d5745c47e"} Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.961967 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.962246 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f69c40cb-62ee-4bcd-b08a-89e767dcac83-utilities\") pod \"community-operators-l4mh8\" (UID: \"f69c40cb-62ee-4bcd-b08a-89e767dcac83\") " pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.962329 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drmrx\" (UniqueName: \"kubernetes.io/projected/f69c40cb-62ee-4bcd-b08a-89e767dcac83-kube-api-access-drmrx\") pod \"community-operators-l4mh8\" (UID: \"f69c40cb-62ee-4bcd-b08a-89e767dcac83\") " pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:17:29 crc kubenswrapper[4784]: I0106 08:17:29.962394 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f69c40cb-62ee-4bcd-b08a-89e767dcac83-catalog-content\") pod \"community-operators-l4mh8\" (UID: \"f69c40cb-62ee-4bcd-b08a-89e767dcac83\") " pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:17:29 crc kubenswrapper[4784]: E0106 08:17:29.962616 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:30.462537544 +0000 UTC m=+152.508710381 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.036398 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-s54vm" podStartSLOduration=128.036343087 podStartE2EDuration="2m8.036343087s" podCreationTimestamp="2026-01-06 08:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:29.996790889 +0000 UTC m=+152.042963746" watchObservedRunningTime="2026-01-06 08:17:30.036343087 +0000 UTC m=+152.082515924" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.069921 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ql8jm" event={"ID":"c6a86933-d0ee-4f96-a9d5-265e4eeceac3","Type":"ContainerStarted","Data":"dac938993c02e31ce350d4caf3cf2692028a024ed06e38725d48ce5f7503344d"} Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.070238 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f69c40cb-62ee-4bcd-b08a-89e767dcac83-catalog-content\") pod \"community-operators-l4mh8\" (UID: \"f69c40cb-62ee-4bcd-b08a-89e767dcac83\") " pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.070275 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-utilities\") pod \"certified-operators-q2b9x\" (UID: \"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2\") " pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.070459 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f69c40cb-62ee-4bcd-b08a-89e767dcac83-utilities\") pod \"community-operators-l4mh8\" (UID: \"f69c40cb-62ee-4bcd-b08a-89e767dcac83\") " pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.070522 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-catalog-content\") pod \"certified-operators-q2b9x\" (UID: \"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2\") " pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.070585 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drmrx\" (UniqueName: \"kubernetes.io/projected/f69c40cb-62ee-4bcd-b08a-89e767dcac83-kube-api-access-drmrx\") pod \"community-operators-l4mh8\" (UID: \"f69c40cb-62ee-4bcd-b08a-89e767dcac83\") " pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.070664 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmt6f\" (UniqueName: \"kubernetes.io/projected/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-kube-api-access-fmt6f\") pod \"certified-operators-q2b9x\" (UID: \"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2\") " pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.070735 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:30 crc kubenswrapper[4784]: E0106 08:17:30.071013 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:30.570998797 +0000 UTC m=+152.617171634 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.072445 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f69c40cb-62ee-4bcd-b08a-89e767dcac83-catalog-content\") pod \"community-operators-l4mh8\" (UID: \"f69c40cb-62ee-4bcd-b08a-89e767dcac83\") " pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.075200 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f69c40cb-62ee-4bcd-b08a-89e767dcac83-utilities\") pod \"community-operators-l4mh8\" (UID: \"f69c40cb-62ee-4bcd-b08a-89e767dcac83\") " pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.152670 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drmrx\" (UniqueName: \"kubernetes.io/projected/f69c40cb-62ee-4bcd-b08a-89e767dcac83-kube-api-access-drmrx\") pod \"community-operators-l4mh8\" (UID: \"f69c40cb-62ee-4bcd-b08a-89e767dcac83\") " pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.154185 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r" podStartSLOduration=128.1541748 podStartE2EDuration="2m8.1541748s" podCreationTimestamp="2026-01-06 08:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:30.09297135 +0000 UTC m=+152.139144187" watchObservedRunningTime="2026-01-06 08:17:30.1541748 +0000 UTC m=+152.200347637" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.156847 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx" podStartSLOduration=129.156831452 podStartE2EDuration="2m9.156831452s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:30.153229294 +0000 UTC m=+152.199402131" watchObservedRunningTime="2026-01-06 08:17:30.156831452 +0000 UTC m=+152.203004289" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.162604 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" event={"ID":"3c94b199-d756-4ba6-8b21-a491a98cc75b","Type":"ContainerStarted","Data":"a12970997dffd6de3aad0b47fa1fb0e15df9cb4308cd5aaa2387d0cf663d3477"} Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.162650 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" event={"ID":"3c94b199-d756-4ba6-8b21-a491a98cc75b","Type":"ContainerStarted","Data":"773ad677329431b2389847338aa6095cdf3b0350f6aac286ce7d5d59400fb4c5"} Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.172129 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.172328 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmt6f\" (UniqueName: \"kubernetes.io/projected/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-kube-api-access-fmt6f\") pod \"certified-operators-q2b9x\" (UID: \"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2\") " pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.172382 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-utilities\") pod \"certified-operators-q2b9x\" (UID: \"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2\") " pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.172470 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-catalog-content\") pod \"certified-operators-q2b9x\" (UID: \"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2\") " pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.172905 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-catalog-content\") pod \"certified-operators-q2b9x\" (UID: \"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2\") " pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:17:30 crc kubenswrapper[4784]: E0106 08:17:30.172968 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:30.672953071 +0000 UTC m=+152.719125908 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.174119 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-utilities\") pod \"certified-operators-q2b9x\" (UID: \"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2\") " pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.193328 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pcmvk" podStartSLOduration=129.193297042 podStartE2EDuration="2m9.193297042s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:30.189843629 +0000 UTC m=+152.236016456" watchObservedRunningTime="2026-01-06 08:17:30.193297042 +0000 UTC m=+152.239469879" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.196302 4784 generic.go:334] "Generic (PLEG): container finished" podID="a3e5f9ba-45cf-41fe-8942-8366faa1ebd1" containerID="3d66d10b387077b8ae77936e6b44e70bf0802fd6207ee7bc9bdd7286642c1220" exitCode=0 Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.196424 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" event={"ID":"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1","Type":"ContainerDied","Data":"3d66d10b387077b8ae77936e6b44e70bf0802fd6207ee7bc9bdd7286642c1220"} Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.196453 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" event={"ID":"a3e5f9ba-45cf-41fe-8942-8366faa1ebd1","Type":"ContainerStarted","Data":"c09332fa6abfb98c8608c18eb70dbd2daf9c04de48b46dec9b7a08bafc69c79f"} Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.207395 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.219002 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" event={"ID":"0d67a630-478b-447f-8c40-6b26cbbcbe5e","Type":"ContainerStarted","Data":"f56695daf0f457ac202b9925e184e2615afa49e556acf4f25ace1a04f86a8266"} Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.228804 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmt6f\" (UniqueName: \"kubernetes.io/projected/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-kube-api-access-fmt6f\") pod \"certified-operators-q2b9x\" (UID: \"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2\") " pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.274158 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:30 crc kubenswrapper[4784]: E0106 08:17:30.275905 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:30.775882862 +0000 UTC m=+152.822055699 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.277528 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-s84bx" event={"ID":"bcdfd6e9-1825-44a3-9255-733080dd11d9","Type":"ContainerStarted","Data":"d91e6ab5c806cedaa45e9e1aea84b8ff40deade7b3793ead7f9ec9a7a2182a78"} Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.294377 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jprht" event={"ID":"593d3e61-46d7-4a61-baba-4b129ad61754","Type":"ContainerStarted","Data":"196f7b97fe64d3420b082f8674d6bb7198e4a9f017548c6e96ae01e2c3004c9d"} Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.296456 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.297295 4784 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-vkt2h container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.297331 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" podUID="47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.325263 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-j5jb7" podStartSLOduration=129.325233846 podStartE2EDuration="2m9.325233846s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:30.321613378 +0000 UTC m=+152.367786205" watchObservedRunningTime="2026-01-06 08:17:30.325233846 +0000 UTC m=+152.371406683" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.371517 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" podStartSLOduration=128.371489222 podStartE2EDuration="2m8.371489222s" podCreationTimestamp="2026-01-06 08:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:30.370869118 +0000 UTC m=+152.417041965" watchObservedRunningTime="2026-01-06 08:17:30.371489222 +0000 UTC m=+152.417662059" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.376176 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:30 crc kubenswrapper[4784]: E0106 08:17:30.376389 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:30.876355959 +0000 UTC m=+152.922528796 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.386918 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:30 crc kubenswrapper[4784]: E0106 08:17:30.388898 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:30.888864969 +0000 UTC m=+152.935037806 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.395160 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-l8w5x" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.411330 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-z56qx" podStartSLOduration=8.411313841 podStartE2EDuration="8.411313841s" podCreationTimestamp="2026-01-06 08:17:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:30.410893645 +0000 UTC m=+152.457066482" watchObservedRunningTime="2026-01-06 08:17:30.411313841 +0000 UTC m=+152.457486678" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.488196 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:30 crc kubenswrapper[4784]: E0106 08:17:30.489651 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:30.989634408 +0000 UTC m=+153.035807245 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.504702 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" podStartSLOduration=129.504666964 podStartE2EDuration="2m9.504666964s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:30.499710724 +0000 UTC m=+152.545883561" watchObservedRunningTime="2026-01-06 08:17:30.504666964 +0000 UTC m=+152.550839801" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.602419 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:30 crc kubenswrapper[4784]: E0106 08:17:30.610099 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:31.110070391 +0000 UTC m=+153.156243228 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.615257 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6bjr5" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.668013 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-mthsj" podStartSLOduration=129.667986874 podStartE2EDuration="2m9.667986874s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:30.666837759 +0000 UTC m=+152.713010596" watchObservedRunningTime="2026-01-06 08:17:30.667986874 +0000 UTC m=+152.714159711" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.730422 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:30 crc kubenswrapper[4784]: E0106 08:17:30.730882 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:31.230823546 +0000 UTC m=+153.276996383 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.731342 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:30 crc kubenswrapper[4784]: E0106 08:17:30.732011 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:31.231901657 +0000 UTC m=+153.278074494 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.745111 4784 patch_prober.go:28] interesting pod/router-default-5444994796-8c87n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 06 08:17:30 crc kubenswrapper[4784]: [-]has-synced failed: reason withheld Jan 06 08:17:30 crc kubenswrapper[4784]: [+]process-running ok Jan 06 08:17:30 crc kubenswrapper[4784]: healthz check failed Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.745213 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8c87n" podUID="b1d67235-5081-4c66-acaa-0620c30e170e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.833280 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:30 crc kubenswrapper[4784]: E0106 08:17:30.833568 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:31.333507548 +0000 UTC m=+153.379680395 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.833767 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:30 crc kubenswrapper[4784]: E0106 08:17:30.834156 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:31.334147623 +0000 UTC m=+153.380320450 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.935617 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:30 crc kubenswrapper[4784]: E0106 08:17:30.936143 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:31.436119636 +0000 UTC m=+153.482292473 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:30 crc kubenswrapper[4784]: I0106 08:17:30.995108 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.013455 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-l4z4z" podStartSLOduration=130.013425825 podStartE2EDuration="2m10.013425825s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:31.013184085 +0000 UTC m=+153.059356922" watchObservedRunningTime="2026-01-06 08:17:31.013425825 +0000 UTC m=+153.059598662" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.042281 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:31 crc kubenswrapper[4784]: E0106 08:17:31.042899 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:31.542883275 +0000 UTC m=+153.589056112 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.144210 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:31 crc kubenswrapper[4784]: E0106 08:17:31.144688 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:31.644671313 +0000 UTC m=+153.690844150 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.229923 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-jprht" podStartSLOduration=130.229898514 podStartE2EDuration="2m10.229898514s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:31.168502227 +0000 UTC m=+153.214675064" watchObservedRunningTime="2026-01-06 08:17:31.229898514 +0000 UTC m=+153.276071351" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.231621 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8l2cs"] Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.247696 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:31 crc kubenswrapper[4784]: E0106 08:17:31.248010 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:31.747998779 +0000 UTC m=+153.794171616 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:31 crc kubenswrapper[4784]: W0106 08:17:31.264255 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8eba439_a397_4536_8b2e_cde21cfc1384.slice/crio-673f6efab892b18555717542f6022bfeb8e5a59e2021ba00b4a8410017ea8d55 WatchSource:0}: Error finding container 673f6efab892b18555717542f6022bfeb8e5a59e2021ba00b4a8410017ea8d55: Status 404 returned error can't find the container with id 673f6efab892b18555717542f6022bfeb8e5a59e2021ba00b4a8410017ea8d55 Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.311366 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq" event={"ID":"556124eb-b442-4918-a98f-c40a2adf4178","Type":"ContainerStarted","Data":"fcf3872de70333ab6d0617f33485f7e9c9bbe3572be5444e5611d6b07a711055"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.312122 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.316487 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jcbhk" event={"ID":"951edc85-0103-4d00-b5b7-960e4c3a40e9","Type":"ContainerStarted","Data":"9c30a4d9682c72dfc5b449a6186003e3a17ed938dc185b7fe7c6304ca0222b23"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.316520 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jcbhk" event={"ID":"951edc85-0103-4d00-b5b7-960e4c3a40e9","Type":"ContainerStarted","Data":"0c8d7336517f89299313773aef302564c20af1163a75160e9575f3dfa17a6c06"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.317050 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-jcbhk" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.318602 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9qntj" event={"ID":"655f5b8f-c824-4ef7-aa83-19dcfa0fee56","Type":"ContainerStarted","Data":"a482d49d0a28c1f03570919b26d69a61394aa577ff90cda78b6679f0a18f8bb7"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.335679 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" event={"ID":"722dd671-6776-4731-8055-795772f78c77","Type":"ContainerStarted","Data":"b105887a9554fae0d448380fb0638c3117d07bf5dccb42e8d0c7ee656a6ad1c1"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.335891 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.349293 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:31 crc kubenswrapper[4784]: E0106 08:17:31.349856 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:31.849833898 +0000 UTC m=+153.896006735 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.405087 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" event={"ID":"2cb40f53-37df-4f44-9bd6-cfb855f08935","Type":"ContainerStarted","Data":"bdde7a402e4dcd8e4f91e7f3f430e8b5e96aad43ad5b655b3e44fe3b29cb728a"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.405143 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" event={"ID":"2cb40f53-37df-4f44-9bd6-cfb855f08935","Type":"ContainerStarted","Data":"705dec023f5d77a22bf7ffcb3ecdb08438855c76db16bd99aefccb2014f28beb"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.452412 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:31 crc kubenswrapper[4784]: E0106 08:17:31.454044 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:31.954030988 +0000 UTC m=+154.000203825 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.493626 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-s84bx" event={"ID":"bcdfd6e9-1825-44a3-9255-733080dd11d9","Type":"ContainerStarted","Data":"eff334c6ff43a23bd9e6d18feba65c18a927162b7624e2b8c7498267ae3f108a"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.493708 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-s84bx" event={"ID":"bcdfd6e9-1825-44a3-9255-733080dd11d9","Type":"ContainerStarted","Data":"62e3f05355b92b985f86ebad7db8a733e56291e1814e2ed44e57a54bd185ba71"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.533948 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9spjg" event={"ID":"1c6d40e7-7e34-46bc-9226-307ed8f18a90","Type":"ContainerStarted","Data":"00250d60c4b8ecb465a57695d25566066ae8f692968397f2ac1d0c2ae826d299"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.555395 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.556068 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-krcbh" event={"ID":"f7787a3d-2716-4923-8b48-9dd09b459af9","Type":"ContainerStarted","Data":"814dee6081da3d1d2315cf90fa2e2bde4a6793c6c93869d546e9eb2b8d082715"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.556128 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-krcbh" event={"ID":"f7787a3d-2716-4923-8b48-9dd09b459af9","Type":"ContainerStarted","Data":"16225126a40095ccf80f5ab130609328cf0a3a3a431d642d046e065e8e9dd263"} Jan 06 08:17:31 crc kubenswrapper[4784]: E0106 08:17:31.556438 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:32.056420759 +0000 UTC m=+154.102593596 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.575254 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8l2cs" event={"ID":"d8eba439-a397-4536-8b2e-cde21cfc1384","Type":"ContainerStarted","Data":"673f6efab892b18555717542f6022bfeb8e5a59e2021ba00b4a8410017ea8d55"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.603038 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-fxbll" event={"ID":"f114d139-4120-49ed-a3a2-bf85c2cb3a84","Type":"ContainerStarted","Data":"3d09b6217e904dd33b7d70ca07d94a1544fe1abe6a0551c24b721cc31c964998"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.604018 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-fxbll" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.615120 4784 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxbll container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.615191 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxbll" podUID="f114d139-4120-49ed-a3a2-bf85c2cb3a84" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.618295 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jgfwh"] Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.621758 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q2b9x"] Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.644532 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-z9l7r" event={"ID":"50cf8327-9f32-4674-99ad-8fc015380ac6","Type":"ContainerStarted","Data":"37e602340b2522591d67f32ded9f56383959a9826b75da931a5d363f2b194ea7"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.659017 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:31 crc kubenswrapper[4784]: E0106 08:17:31.661266 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:32.161246612 +0000 UTC m=+154.207419449 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.677164 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m52dx" event={"ID":"93589e54-9797-4be3-8daf-52530390571d","Type":"ContainerStarted","Data":"3fc53d131d46886915ad4e1f2c5a2aff9185e8b8e0e68ca194fcfe140ac52c09"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.699325 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ffzbc"] Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.700724 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l4mh8"] Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.701069 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.753094 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.760286 4784 patch_prober.go:28] interesting pod/router-default-5444994796-8c87n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 06 08:17:31 crc kubenswrapper[4784]: [-]has-synced failed: reason withheld Jan 06 08:17:31 crc kubenswrapper[4784]: [+]process-running ok Jan 06 08:17:31 crc kubenswrapper[4784]: healthz check failed Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.760369 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8c87n" podUID="b1d67235-5081-4c66-acaa-0620c30e170e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.767277 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jprht" event={"ID":"593d3e61-46d7-4a61-baba-4b129ad61754","Type":"ContainerStarted","Data":"cb08e072191a7ac71cf68cbbbff2d2fe7a814d2c35cd6d75bc3ad6a7008c321e"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.767331 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jprht" event={"ID":"593d3e61-46d7-4a61-baba-4b129ad61754","Type":"ContainerStarted","Data":"126d148a8a76fbcb78fd51a0ebb190a1fb5d101e3ef309f709fee42e94ea0d54"} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.783949 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq" podStartSLOduration=129.783917661 podStartE2EDuration="2m9.783917661s" podCreationTimestamp="2026-01-06 08:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:31.755824873 +0000 UTC m=+153.801997710" watchObservedRunningTime="2026-01-06 08:17:31.783917661 +0000 UTC m=+153.830090498" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.794948 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.806021 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p759p\" (UniqueName: \"kubernetes.io/projected/971db4c5-67a0-42f5-b71f-042c91d097b5-kube-api-access-p759p\") pod \"redhat-marketplace-ffzbc\" (UID: \"971db4c5-67a0-42f5-b71f-042c91d097b5\") " pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.806189 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/971db4c5-67a0-42f5-b71f-042c91d097b5-catalog-content\") pod \"redhat-marketplace-ffzbc\" (UID: \"971db4c5-67a0-42f5-b71f-042c91d097b5\") " pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.806233 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/971db4c5-67a0-42f5-b71f-042c91d097b5-utilities\") pod \"redhat-marketplace-ffzbc\" (UID: \"971db4c5-67a0-42f5-b71f-042c91d097b5\") " pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:17:31 crc kubenswrapper[4784]: E0106 08:17:31.810344 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:32.310301764 +0000 UTC m=+154.356474591 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.819795 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ffzbc"] Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.838871 4784 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.863627 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.864477 4784 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-06T08:17:31.838903962Z","Handler":null,"Name":""} Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.892676 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" podStartSLOduration=130.892646896 podStartE2EDuration="2m10.892646896s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:31.810567355 +0000 UTC m=+153.856740192" watchObservedRunningTime="2026-01-06 08:17:31.892646896 +0000 UTC m=+153.938819723" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.910601 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vnsnw"] Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.912940 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.914282 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p759p\" (UniqueName: \"kubernetes.io/projected/971db4c5-67a0-42f5-b71f-042c91d097b5-kube-api-access-p759p\") pod \"redhat-marketplace-ffzbc\" (UID: \"971db4c5-67a0-42f5-b71f-042c91d097b5\") " pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.914595 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/971db4c5-67a0-42f5-b71f-042c91d097b5-catalog-content\") pod \"redhat-marketplace-ffzbc\" (UID: \"971db4c5-67a0-42f5-b71f-042c91d097b5\") " pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.914653 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/971db4c5-67a0-42f5-b71f-042c91d097b5-utilities\") pod \"redhat-marketplace-ffzbc\" (UID: \"971db4c5-67a0-42f5-b71f-042c91d097b5\") " pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.914856 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:31 crc kubenswrapper[4784]: E0106 08:17:31.976114 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:32.476091439 +0000 UTC m=+154.522264266 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.980127 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vnsnw"] Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.987806 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/971db4c5-67a0-42f5-b71f-042c91d097b5-catalog-content\") pod \"redhat-marketplace-ffzbc\" (UID: \"971db4c5-67a0-42f5-b71f-042c91d097b5\") " pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:17:31 crc kubenswrapper[4784]: I0106 08:17:31.991348 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/971db4c5-67a0-42f5-b71f-042c91d097b5-utilities\") pod \"redhat-marketplace-ffzbc\" (UID: \"971db4c5-67a0-42f5-b71f-042c91d097b5\") " pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.004836 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-fxbll" podStartSLOduration=131.00479656 podStartE2EDuration="2m11.00479656s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:31.910185728 +0000 UTC m=+153.956358565" watchObservedRunningTime="2026-01-06 08:17:32.00479656 +0000 UTC m=+154.050969397" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.007939 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-s84bx" podStartSLOduration=130.00792675 podStartE2EDuration="2m10.00792675s" podCreationTimestamp="2026-01-06 08:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:31.980683675 +0000 UTC m=+154.026856522" watchObservedRunningTime="2026-01-06 08:17:32.00792675 +0000 UTC m=+154.054099587" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.017447 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.017609 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01361f8d-987c-43b9-ab45-ee19576a9b90-utilities\") pod \"redhat-marketplace-vnsnw\" (UID: \"01361f8d-987c-43b9-ab45-ee19576a9b90\") " pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.017675 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhggz\" (UniqueName: \"kubernetes.io/projected/01361f8d-987c-43b9-ab45-ee19576a9b90-kube-api-access-jhggz\") pod \"redhat-marketplace-vnsnw\" (UID: \"01361f8d-987c-43b9-ab45-ee19576a9b90\") " pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:17:32 crc kubenswrapper[4784]: E0106 08:17:32.017747 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-06 08:17:32.517719817 +0000 UTC m=+154.563892654 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.017825 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01361f8d-987c-43b9-ab45-ee19576a9b90-catalog-content\") pod \"redhat-marketplace-vnsnw\" (UID: \"01361f8d-987c-43b9-ab45-ee19576a9b90\") " pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.017938 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:32 crc kubenswrapper[4784]: E0106 08:17:32.018236 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-06 08:17:32.518229736 +0000 UTC m=+154.564402573 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rhgh4" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.030155 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p759p\" (UniqueName: \"kubernetes.io/projected/971db4c5-67a0-42f5-b71f-042c91d097b5-kube-api-access-p759p\") pod \"redhat-marketplace-ffzbc\" (UID: \"971db4c5-67a0-42f5-b71f-042c91d097b5\") " pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.060922 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" podStartSLOduration=131.060905154 podStartE2EDuration="2m11.060905154s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:32.026762874 +0000 UTC m=+154.072935711" watchObservedRunningTime="2026-01-06 08:17:32.060905154 +0000 UTC m=+154.107077991" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.079622 4784 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.080245 4784 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.100756 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-jcbhk" podStartSLOduration=10.100732073 podStartE2EDuration="10.100732073s" podCreationTimestamp="2026-01-06 08:17:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:32.06236576 +0000 UTC m=+154.108538597" watchObservedRunningTime="2026-01-06 08:17:32.100732073 +0000 UTC m=+154.146904910" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.109450 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-krcbh" podStartSLOduration=131.109433517 podStartE2EDuration="2m11.109433517s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:32.100939481 +0000 UTC m=+154.147112318" watchObservedRunningTime="2026-01-06 08:17:32.109433517 +0000 UTC m=+154.155606354" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.123981 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.124414 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01361f8d-987c-43b9-ab45-ee19576a9b90-utilities\") pod \"redhat-marketplace-vnsnw\" (UID: \"01361f8d-987c-43b9-ab45-ee19576a9b90\") " pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.124479 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhggz\" (UniqueName: \"kubernetes.io/projected/01361f8d-987c-43b9-ab45-ee19576a9b90-kube-api-access-jhggz\") pod \"redhat-marketplace-vnsnw\" (UID: \"01361f8d-987c-43b9-ab45-ee19576a9b90\") " pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.124507 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01361f8d-987c-43b9-ab45-ee19576a9b90-catalog-content\") pod \"redhat-marketplace-vnsnw\" (UID: \"01361f8d-987c-43b9-ab45-ee19576a9b90\") " pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.124922 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01361f8d-987c-43b9-ab45-ee19576a9b90-catalog-content\") pod \"redhat-marketplace-vnsnw\" (UID: \"01361f8d-987c-43b9-ab45-ee19576a9b90\") " pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.125103 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01361f8d-987c-43b9-ab45-ee19576a9b90-utilities\") pod \"redhat-marketplace-vnsnw\" (UID: \"01361f8d-987c-43b9-ab45-ee19576a9b90\") " pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.134843 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.174712 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.205670 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhggz\" (UniqueName: \"kubernetes.io/projected/01361f8d-987c-43b9-ab45-ee19576a9b90-kube-api-access-jhggz\") pod \"redhat-marketplace-vnsnw\" (UID: \"01361f8d-987c-43b9-ab45-ee19576a9b90\") " pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.229114 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.248677 4784 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.248721 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.323103 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.342631 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.426595 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wv9d2"] Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.427525 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.430666 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.455910 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wv9d2"] Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.533303 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77358ffe-6346-4747-9847-27c607f4a2a3-catalog-content\") pod \"redhat-operators-wv9d2\" (UID: \"77358ffe-6346-4747-9847-27c607f4a2a3\") " pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.533344 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77358ffe-6346-4747-9847-27c607f4a2a3-utilities\") pod \"redhat-operators-wv9d2\" (UID: \"77358ffe-6346-4747-9847-27c607f4a2a3\") " pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.533396 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7h46l\" (UniqueName: \"kubernetes.io/projected/77358ffe-6346-4747-9847-27c607f4a2a3-kube-api-access-7h46l\") pod \"redhat-operators-wv9d2\" (UID: \"77358ffe-6346-4747-9847-27c607f4a2a3\") " pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.586694 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rhgh4\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.634370 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77358ffe-6346-4747-9847-27c607f4a2a3-catalog-content\") pod \"redhat-operators-wv9d2\" (UID: \"77358ffe-6346-4747-9847-27c607f4a2a3\") " pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.634418 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77358ffe-6346-4747-9847-27c607f4a2a3-utilities\") pod \"redhat-operators-wv9d2\" (UID: \"77358ffe-6346-4747-9847-27c607f4a2a3\") " pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.634474 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7h46l\" (UniqueName: \"kubernetes.io/projected/77358ffe-6346-4747-9847-27c607f4a2a3-kube-api-access-7h46l\") pod \"redhat-operators-wv9d2\" (UID: \"77358ffe-6346-4747-9847-27c607f4a2a3\") " pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.635485 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77358ffe-6346-4747-9847-27c607f4a2a3-catalog-content\") pod \"redhat-operators-wv9d2\" (UID: \"77358ffe-6346-4747-9847-27c607f4a2a3\") " pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.635699 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77358ffe-6346-4747-9847-27c607f4a2a3-utilities\") pod \"redhat-operators-wv9d2\" (UID: \"77358ffe-6346-4747-9847-27c607f4a2a3\") " pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.655927 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7h46l\" (UniqueName: \"kubernetes.io/projected/77358ffe-6346-4747-9847-27c607f4a2a3-kube-api-access-7h46l\") pod \"redhat-operators-wv9d2\" (UID: \"77358ffe-6346-4747-9847-27c607f4a2a3\") " pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.716265 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ffzbc"] Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.730370 4784 patch_prober.go:28] interesting pod/router-default-5444994796-8c87n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 06 08:17:32 crc kubenswrapper[4784]: [-]has-synced failed: reason withheld Jan 06 08:17:32 crc kubenswrapper[4784]: [+]process-running ok Jan 06 08:17:32 crc kubenswrapper[4784]: healthz check failed Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.730447 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8c87n" podUID="b1d67235-5081-4c66-acaa-0620c30e170e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:32 crc kubenswrapper[4784]: W0106 08:17:32.770754 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod971db4c5_67a0_42f5_b71f_042c91d097b5.slice/crio-a94897c75ae571f9b917583df11a8083303c7b95409a762ef0c790818a749433 WatchSource:0}: Error finding container a94897c75ae571f9b917583df11a8083303c7b95409a762ef0c790818a749433: Status 404 returned error can't find the container with id a94897c75ae571f9b917583df11a8083303c7b95409a762ef0c790818a749433 Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.800385 4784 generic.go:334] "Generic (PLEG): container finished" podID="95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" containerID="e1053e7b44abe5ac456b5a44771d20f45cdd3e5507f68e2d433042e72f9639db" exitCode=0 Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.800457 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q2b9x" event={"ID":"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2","Type":"ContainerDied","Data":"e1053e7b44abe5ac456b5a44771d20f45cdd3e5507f68e2d433042e72f9639db"} Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.800483 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q2b9x" event={"ID":"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2","Type":"ContainerStarted","Data":"496b38fe99d47e7afff33267a4d60ec89c7372584f8a77d9da61629b88884530"} Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.814302 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vnsnw"] Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.822740 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9qntj" event={"ID":"655f5b8f-c824-4ef7-aa83-19dcfa0fee56","Type":"ContainerStarted","Data":"5fe7aff4d8f9ba38a7f7bb2287dec6e8426af9bbc7728971a2e450a13e66311d"} Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.833567 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ffzbc" event={"ID":"971db4c5-67a0-42f5-b71f-042c91d097b5","Type":"ContainerStarted","Data":"a94897c75ae571f9b917583df11a8083303c7b95409a762ef0c790818a749433"} Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.873795 4784 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.873907 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vph2c"] Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.875085 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.890202 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.891406 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.895112 4784 generic.go:334] "Generic (PLEG): container finished" podID="f69c40cb-62ee-4bcd-b08a-89e767dcac83" containerID="b8b52196551a4f27f5f48330cb6bb0d2d4d88eb67c737ca88977b6818c94cc24" exitCode=0 Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.895828 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4mh8" event={"ID":"f69c40cb-62ee-4bcd-b08a-89e767dcac83","Type":"ContainerDied","Data":"b8b52196551a4f27f5f48330cb6bb0d2d4d88eb67c737ca88977b6818c94cc24"} Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.895863 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4mh8" event={"ID":"f69c40cb-62ee-4bcd-b08a-89e767dcac83","Type":"ContainerStarted","Data":"de11d18dab049eeac8f4b5499ee92b1806efdb29acf783f843ebcdf31bc44cdf"} Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.898987 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vph2c"] Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.904477 4784 generic.go:334] "Generic (PLEG): container finished" podID="b6eac71f-1d65-4542-9722-211fee770bba" containerID="cc523b2086978fe5ad2e48d1215e9cbc8b60bdb3aff332ff09aad809a2eade1e" exitCode=0 Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.904591 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jgfwh" event={"ID":"b6eac71f-1d65-4542-9722-211fee770bba","Type":"ContainerDied","Data":"cc523b2086978fe5ad2e48d1215e9cbc8b60bdb3aff332ff09aad809a2eade1e"} Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.904640 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jgfwh" event={"ID":"b6eac71f-1d65-4542-9722-211fee770bba","Type":"ContainerStarted","Data":"f1ddc8243b389cf95f5fc722170b444bdfdc8386940aad180160aa8e08661341"} Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.929909 4784 generic.go:334] "Generic (PLEG): container finished" podID="d8eba439-a397-4536-8b2e-cde21cfc1384" containerID="67bc8735f68736a0df59a183bd5f1b1dcd6efce20ef78be5582253c5799c46e5" exitCode=0 Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.930798 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8l2cs" event={"ID":"d8eba439-a397-4536-8b2e-cde21cfc1384","Type":"ContainerDied","Data":"67bc8735f68736a0df59a183bd5f1b1dcd6efce20ef78be5582253c5799c46e5"} Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.941666 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr79s" Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.953455 4784 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxbll container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Jan 06 08:17:32 crc kubenswrapper[4784]: I0106 08:17:32.956909 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxbll" podUID="f114d139-4120-49ed-a3a2-bf85c2cb3a84" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.039302 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jb7tc\" (UniqueName: \"kubernetes.io/projected/e179e7e8-0210-4afe-9fe2-cf76289c135c-kube-api-access-jb7tc\") pod \"redhat-operators-vph2c\" (UID: \"e179e7e8-0210-4afe-9fe2-cf76289c135c\") " pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.039955 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e179e7e8-0210-4afe-9fe2-cf76289c135c-utilities\") pod \"redhat-operators-vph2c\" (UID: \"e179e7e8-0210-4afe-9fe2-cf76289c135c\") " pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.040168 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e179e7e8-0210-4afe-9fe2-cf76289c135c-catalog-content\") pod \"redhat-operators-vph2c\" (UID: \"e179e7e8-0210-4afe-9fe2-cf76289c135c\") " pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.144276 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e179e7e8-0210-4afe-9fe2-cf76289c135c-utilities\") pod \"redhat-operators-vph2c\" (UID: \"e179e7e8-0210-4afe-9fe2-cf76289c135c\") " pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.145215 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e179e7e8-0210-4afe-9fe2-cf76289c135c-catalog-content\") pod \"redhat-operators-vph2c\" (UID: \"e179e7e8-0210-4afe-9fe2-cf76289c135c\") " pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.145270 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jb7tc\" (UniqueName: \"kubernetes.io/projected/e179e7e8-0210-4afe-9fe2-cf76289c135c-kube-api-access-jb7tc\") pod \"redhat-operators-vph2c\" (UID: \"e179e7e8-0210-4afe-9fe2-cf76289c135c\") " pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.145160 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e179e7e8-0210-4afe-9fe2-cf76289c135c-utilities\") pod \"redhat-operators-vph2c\" (UID: \"e179e7e8-0210-4afe-9fe2-cf76289c135c\") " pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.145742 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e179e7e8-0210-4afe-9fe2-cf76289c135c-catalog-content\") pod \"redhat-operators-vph2c\" (UID: \"e179e7e8-0210-4afe-9fe2-cf76289c135c\") " pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.169532 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jb7tc\" (UniqueName: \"kubernetes.io/projected/e179e7e8-0210-4afe-9fe2-cf76289c135c-kube-api-access-jb7tc\") pod \"redhat-operators-vph2c\" (UID: \"e179e7e8-0210-4afe-9fe2-cf76289c135c\") " pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.211306 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.429107 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wv9d2"] Jan 06 08:17:33 crc kubenswrapper[4784]: W0106 08:17:33.460655 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod77358ffe_6346_4747_9847_27c607f4a2a3.slice/crio-5616c201db1683d96f83b56c6988d487d94d98a9027d7251bd0501db2553fb01 WatchSource:0}: Error finding container 5616c201db1683d96f83b56c6988d487d94d98a9027d7251bd0501db2553fb01: Status 404 returned error can't find the container with id 5616c201db1683d96f83b56c6988d487d94d98a9027d7251bd0501db2553fb01 Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.570690 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rhgh4"] Jan 06 08:17:33 crc kubenswrapper[4784]: W0106 08:17:33.583434 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod90ed9938_c2d5_4d7a_9f34_b0e908f9869a.slice/crio-8d5ad624199617e1232c9c848dfca5d46a1101fdeaf85453ef1b91f1883a9e67 WatchSource:0}: Error finding container 8d5ad624199617e1232c9c848dfca5d46a1101fdeaf85453ef1b91f1883a9e67: Status 404 returned error can't find the container with id 8d5ad624199617e1232c9c848dfca5d46a1101fdeaf85453ef1b91f1883a9e67 Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.643475 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vph2c"] Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.720169 4784 patch_prober.go:28] interesting pod/router-default-5444994796-8c87n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 06 08:17:33 crc kubenswrapper[4784]: [-]has-synced failed: reason withheld Jan 06 08:17:33 crc kubenswrapper[4784]: [+]process-running ok Jan 06 08:17:33 crc kubenswrapper[4784]: healthz check failed Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.720252 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8c87n" podUID="b1d67235-5081-4c66-acaa-0620c30e170e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.943627 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9qntj" event={"ID":"655f5b8f-c824-4ef7-aa83-19dcfa0fee56","Type":"ContainerStarted","Data":"5aa38084534ef9c7c24fedd00f4b6fd29e1ceaa231ae4e3eaf3b6a1a0b0684ab"} Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.947222 4784 generic.go:334] "Generic (PLEG): container finished" podID="971db4c5-67a0-42f5-b71f-042c91d097b5" containerID="eee8e10a9f7671da51cd771e569dea538d14c83d2605d8a8cd3b127fc1538a54" exitCode=0 Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.947275 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ffzbc" event={"ID":"971db4c5-67a0-42f5-b71f-042c91d097b5","Type":"ContainerDied","Data":"eee8e10a9f7671da51cd771e569dea538d14c83d2605d8a8cd3b127fc1538a54"} Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.955005 4784 generic.go:334] "Generic (PLEG): container finished" podID="8d0f11e4-cf5f-414f-ab5c-71c303b6774c" containerID="899cda17276079630624cd1538ddc73bdf2b5c344d788f36b118ea6ebafad9e9" exitCode=0 Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.955874 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" event={"ID":"8d0f11e4-cf5f-414f-ab5c-71c303b6774c","Type":"ContainerDied","Data":"899cda17276079630624cd1538ddc73bdf2b5c344d788f36b118ea6ebafad9e9"} Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.961154 4784 generic.go:334] "Generic (PLEG): container finished" podID="77358ffe-6346-4747-9847-27c607f4a2a3" containerID="4b79ae8a056f8e703e496873e609f88446489f4fb21f3cb52b445e006f940cf3" exitCode=0 Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.963359 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wv9d2" event={"ID":"77358ffe-6346-4747-9847-27c607f4a2a3","Type":"ContainerDied","Data":"4b79ae8a056f8e703e496873e609f88446489f4fb21f3cb52b445e006f940cf3"} Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.963450 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wv9d2" event={"ID":"77358ffe-6346-4747-9847-27c607f4a2a3","Type":"ContainerStarted","Data":"5616c201db1683d96f83b56c6988d487d94d98a9027d7251bd0501db2553fb01"} Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.964288 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-9qntj" podStartSLOduration=11.964267109 podStartE2EDuration="11.964267109s" podCreationTimestamp="2026-01-06 08:17:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:33.962908527 +0000 UTC m=+156.009081364" watchObservedRunningTime="2026-01-06 08:17:33.964267109 +0000 UTC m=+156.010439946" Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.972388 4784 generic.go:334] "Generic (PLEG): container finished" podID="e179e7e8-0210-4afe-9fe2-cf76289c135c" containerID="76276fc720381770fc564f5eff49be2d83a888cb01205195199c24dea796b320" exitCode=0 Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.972483 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vph2c" event={"ID":"e179e7e8-0210-4afe-9fe2-cf76289c135c","Type":"ContainerDied","Data":"76276fc720381770fc564f5eff49be2d83a888cb01205195199c24dea796b320"} Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.972551 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vph2c" event={"ID":"e179e7e8-0210-4afe-9fe2-cf76289c135c","Type":"ContainerStarted","Data":"ac636755247a7edcb4827798a9708c08334355dae9a1817bb8fd0cf7f876d2d3"} Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.975881 4784 generic.go:334] "Generic (PLEG): container finished" podID="01361f8d-987c-43b9-ab45-ee19576a9b90" containerID="caad2516700b692964efc883e42a0c31861e632e50144ef9c195906a5b1ab281" exitCode=0 Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.976022 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vnsnw" event={"ID":"01361f8d-987c-43b9-ab45-ee19576a9b90","Type":"ContainerDied","Data":"caad2516700b692964efc883e42a0c31861e632e50144ef9c195906a5b1ab281"} Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.976066 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vnsnw" event={"ID":"01361f8d-987c-43b9-ab45-ee19576a9b90","Type":"ContainerStarted","Data":"3db91bfdf81c84231c0166e23d8eb13798bbb8068dd5b19b132db959258d025b"} Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.989472 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" event={"ID":"90ed9938-c2d5-4d7a-9f34-b0e908f9869a","Type":"ContainerStarted","Data":"427b12d79118861579a9858f352e25ba1a3cfae947a9aecc93ddca26513e2c42"} Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.989582 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" event={"ID":"90ed9938-c2d5-4d7a-9f34-b0e908f9869a","Type":"ContainerStarted","Data":"8d5ad624199617e1232c9c848dfca5d46a1101fdeaf85453ef1b91f1883a9e67"} Jan 06 08:17:33 crc kubenswrapper[4784]: I0106 08:17:33.990018 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:17:34 crc kubenswrapper[4784]: I0106 08:17:34.093797 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" podStartSLOduration=133.0937701 podStartE2EDuration="2m13.0937701s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:17:34.091318956 +0000 UTC m=+156.137491973" watchObservedRunningTime="2026-01-06 08:17:34.0937701 +0000 UTC m=+156.139942937" Jan 06 08:17:34 crc kubenswrapper[4784]: I0106 08:17:34.370461 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:34 crc kubenswrapper[4784]: I0106 08:17:34.371189 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:34 crc kubenswrapper[4784]: I0106 08:17:34.387155 4784 patch_prober.go:28] interesting pod/apiserver-76f77b778f-nm6wl container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 06 08:17:34 crc kubenswrapper[4784]: [+]log ok Jan 06 08:17:34 crc kubenswrapper[4784]: [+]etcd ok Jan 06 08:17:34 crc kubenswrapper[4784]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 06 08:17:34 crc kubenswrapper[4784]: [+]poststarthook/generic-apiserver-start-informers ok Jan 06 08:17:34 crc kubenswrapper[4784]: [+]poststarthook/max-in-flight-filter ok Jan 06 08:17:34 crc kubenswrapper[4784]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 06 08:17:34 crc kubenswrapper[4784]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 06 08:17:34 crc kubenswrapper[4784]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 06 08:17:34 crc kubenswrapper[4784]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Jan 06 08:17:34 crc kubenswrapper[4784]: [+]poststarthook/project.openshift.io-projectcache ok Jan 06 08:17:34 crc kubenswrapper[4784]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 06 08:17:34 crc kubenswrapper[4784]: [+]poststarthook/openshift.io-startinformers ok Jan 06 08:17:34 crc kubenswrapper[4784]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 06 08:17:34 crc kubenswrapper[4784]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 06 08:17:34 crc kubenswrapper[4784]: livez check failed Jan 06 08:17:34 crc kubenswrapper[4784]: I0106 08:17:34.387207 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" podUID="2cb40f53-37df-4f44-9bd6-cfb855f08935" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:34 crc kubenswrapper[4784]: I0106 08:17:34.715065 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:34 crc kubenswrapper[4784]: I0106 08:17:34.718755 4784 patch_prober.go:28] interesting pod/router-default-5444994796-8c87n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 06 08:17:34 crc kubenswrapper[4784]: [-]has-synced failed: reason withheld Jan 06 08:17:34 crc kubenswrapper[4784]: [+]process-running ok Jan 06 08:17:34 crc kubenswrapper[4784]: healthz check failed Jan 06 08:17:34 crc kubenswrapper[4784]: I0106 08:17:34.718827 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8c87n" podUID="b1d67235-5081-4c66-acaa-0620c30e170e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:34 crc kubenswrapper[4784]: I0106 08:17:34.754683 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:34 crc kubenswrapper[4784]: I0106 08:17:34.754720 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:34 crc kubenswrapper[4784]: I0106 08:17:34.756268 4784 patch_prober.go:28] interesting pod/console-f9d7485db-q2d7x container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.9:8443/health\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Jan 06 08:17:34 crc kubenswrapper[4784]: I0106 08:17:34.756322 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-q2d7x" podUID="ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0" containerName="console" probeResult="failure" output="Get \"https://10.217.0.9:8443/health\": dial tcp 10.217.0.9:8443: connect: connection refused" Jan 06 08:17:34 crc kubenswrapper[4784]: I0106 08:17:34.815515 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:34 crc kubenswrapper[4784]: I0106 08:17:34.815568 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:34 crc kubenswrapper[4784]: I0106 08:17:34.830425 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.016828 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wdqc5" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.431109 4784 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxbll container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.431398 4784 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxbll container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.431443 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxbll" podUID="f114d139-4120-49ed-a3a2-bf85c2cb3a84" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.431400 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-fxbll" podUID="f114d139-4120-49ed-a3a2-bf85c2cb3a84" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.485818 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.496936 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 06 08:17:35 crc kubenswrapper[4784]: E0106 08:17:35.497238 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d0f11e4-cf5f-414f-ab5c-71c303b6774c" containerName="collect-profiles" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.497256 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d0f11e4-cf5f-414f-ab5c-71c303b6774c" containerName="collect-profiles" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.497372 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d0f11e4-cf5f-414f-ab5c-71c303b6774c" containerName="collect-profiles" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.497809 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.500851 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.501159 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.502686 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.612211 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-config-volume\") pod \"8d0f11e4-cf5f-414f-ab5c-71c303b6774c\" (UID: \"8d0f11e4-cf5f-414f-ab5c-71c303b6774c\") " Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.612339 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-secret-volume\") pod \"8d0f11e4-cf5f-414f-ab5c-71c303b6774c\" (UID: \"8d0f11e4-cf5f-414f-ab5c-71c303b6774c\") " Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.613133 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-config-volume" (OuterVolumeSpecName: "config-volume") pod "8d0f11e4-cf5f-414f-ab5c-71c303b6774c" (UID: "8d0f11e4-cf5f-414f-ab5c-71c303b6774c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.613183 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mbxsf\" (UniqueName: \"kubernetes.io/projected/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-kube-api-access-mbxsf\") pod \"8d0f11e4-cf5f-414f-ab5c-71c303b6774c\" (UID: \"8d0f11e4-cf5f-414f-ab5c-71c303b6774c\") " Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.613601 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7488414c-136d-40e5-85aa-cdbddc78e045-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7488414c-136d-40e5-85aa-cdbddc78e045\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.613693 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7488414c-136d-40e5-85aa-cdbddc78e045-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7488414c-136d-40e5-85aa-cdbddc78e045\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.613729 4784 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-config-volume\") on node \"crc\" DevicePath \"\"" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.626678 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-kube-api-access-mbxsf" (OuterVolumeSpecName: "kube-api-access-mbxsf") pod "8d0f11e4-cf5f-414f-ab5c-71c303b6774c" (UID: "8d0f11e4-cf5f-414f-ab5c-71c303b6774c"). InnerVolumeSpecName "kube-api-access-mbxsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.631175 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8d0f11e4-cf5f-414f-ab5c-71c303b6774c" (UID: "8d0f11e4-cf5f-414f-ab5c-71c303b6774c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.714377 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7488414c-136d-40e5-85aa-cdbddc78e045-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7488414c-136d-40e5-85aa-cdbddc78e045\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.714484 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7488414c-136d-40e5-85aa-cdbddc78e045-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7488414c-136d-40e5-85aa-cdbddc78e045\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.714573 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mbxsf\" (UniqueName: \"kubernetes.io/projected/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-kube-api-access-mbxsf\") on node \"crc\" DevicePath \"\"" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.714587 4784 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8d0f11e4-cf5f-414f-ab5c-71c303b6774c-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.714581 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7488414c-136d-40e5-85aa-cdbddc78e045-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7488414c-136d-40e5-85aa-cdbddc78e045\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.718025 4784 patch_prober.go:28] interesting pod/router-default-5444994796-8c87n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 06 08:17:35 crc kubenswrapper[4784]: [-]has-synced failed: reason withheld Jan 06 08:17:35 crc kubenswrapper[4784]: [+]process-running ok Jan 06 08:17:35 crc kubenswrapper[4784]: healthz check failed Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.718072 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8c87n" podUID="b1d67235-5081-4c66-acaa-0620c30e170e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.734430 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7488414c-136d-40e5-85aa-cdbddc78e045-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7488414c-136d-40e5-85aa-cdbddc78e045\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 06 08:17:35 crc kubenswrapper[4784]: I0106 08:17:35.823074 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.061778 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" event={"ID":"8d0f11e4-cf5f-414f-ab5c-71c303b6774c","Type":"ContainerDied","Data":"e7413dabbe427464ca31d45316401d8d1df88787c131a29dfedf29b7876a980e"} Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.061841 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7413dabbe427464ca31d45316401d8d1df88787c131a29dfedf29b7876a980e" Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.061799 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m" Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.397666 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.399978 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.406448 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.406717 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.411301 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.482575 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 06 08:17:36 crc kubenswrapper[4784]: W0106 08:17:36.521505 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod7488414c_136d_40e5_85aa_cdbddc78e045.slice/crio-ea0f301dae81780566ad8782ece73609c39cbde7ddcedd709d41d974172edb52 WatchSource:0}: Error finding container ea0f301dae81780566ad8782ece73609c39cbde7ddcedd709d41d974172edb52: Status 404 returned error can't find the container with id ea0f301dae81780566ad8782ece73609c39cbde7ddcedd709d41d974172edb52 Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.531330 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f4516146-b1cc-4e3c-89e0-518eb0ffb4d3-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"f4516146-b1cc-4e3c-89e0-518eb0ffb4d3\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.531409 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f4516146-b1cc-4e3c-89e0-518eb0ffb4d3-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"f4516146-b1cc-4e3c-89e0-518eb0ffb4d3\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.633252 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f4516146-b1cc-4e3c-89e0-518eb0ffb4d3-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"f4516146-b1cc-4e3c-89e0-518eb0ffb4d3\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.633338 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f4516146-b1cc-4e3c-89e0-518eb0ffb4d3-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"f4516146-b1cc-4e3c-89e0-518eb0ffb4d3\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.633432 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f4516146-b1cc-4e3c-89e0-518eb0ffb4d3-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"f4516146-b1cc-4e3c-89e0-518eb0ffb4d3\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.659862 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f4516146-b1cc-4e3c-89e0-518eb0ffb4d3-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"f4516146-b1cc-4e3c-89e0-518eb0ffb4d3\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.719850 4784 patch_prober.go:28] interesting pod/router-default-5444994796-8c87n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 06 08:17:36 crc kubenswrapper[4784]: [-]has-synced failed: reason withheld Jan 06 08:17:36 crc kubenswrapper[4784]: [+]process-running ok Jan 06 08:17:36 crc kubenswrapper[4784]: healthz check failed Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.720385 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8c87n" podUID="b1d67235-5081-4c66-acaa-0620c30e170e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:36 crc kubenswrapper[4784]: I0106 08:17:36.749058 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 06 08:17:37 crc kubenswrapper[4784]: I0106 08:17:37.082709 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"7488414c-136d-40e5-85aa-cdbddc78e045","Type":"ContainerStarted","Data":"ea0f301dae81780566ad8782ece73609c39cbde7ddcedd709d41d974172edb52"} Jan 06 08:17:37 crc kubenswrapper[4784]: I0106 08:17:37.161989 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 06 08:17:37 crc kubenswrapper[4784]: I0106 08:17:37.719031 4784 patch_prober.go:28] interesting pod/router-default-5444994796-8c87n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 06 08:17:37 crc kubenswrapper[4784]: [-]has-synced failed: reason withheld Jan 06 08:17:37 crc kubenswrapper[4784]: [+]process-running ok Jan 06 08:17:37 crc kubenswrapper[4784]: healthz check failed Jan 06 08:17:37 crc kubenswrapper[4784]: I0106 08:17:37.719469 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8c87n" podUID="b1d67235-5081-4c66-acaa-0620c30e170e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:38 crc kubenswrapper[4784]: I0106 08:17:38.110146 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"f4516146-b1cc-4e3c-89e0-518eb0ffb4d3","Type":"ContainerStarted","Data":"911de97ad0526262ae202fff32ce587a741f44f4109065915d7309edc020f433"} Jan 06 08:17:38 crc kubenswrapper[4784]: I0106 08:17:38.113130 4784 generic.go:334] "Generic (PLEG): container finished" podID="7488414c-136d-40e5-85aa-cdbddc78e045" containerID="5d3f57bca79b099ce20981b08a4a20067c17302e72d5229731a1ed6a28502748" exitCode=0 Jan 06 08:17:38 crc kubenswrapper[4784]: I0106 08:17:38.113186 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"7488414c-136d-40e5-85aa-cdbddc78e045","Type":"ContainerDied","Data":"5d3f57bca79b099ce20981b08a4a20067c17302e72d5229731a1ed6a28502748"} Jan 06 08:17:38 crc kubenswrapper[4784]: I0106 08:17:38.718813 4784 patch_prober.go:28] interesting pod/router-default-5444994796-8c87n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 06 08:17:38 crc kubenswrapper[4784]: [-]has-synced failed: reason withheld Jan 06 08:17:38 crc kubenswrapper[4784]: [+]process-running ok Jan 06 08:17:38 crc kubenswrapper[4784]: healthz check failed Jan 06 08:17:38 crc kubenswrapper[4784]: I0106 08:17:38.719178 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8c87n" podUID="b1d67235-5081-4c66-acaa-0620c30e170e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:39 crc kubenswrapper[4784]: I0106 08:17:39.124716 4784 generic.go:334] "Generic (PLEG): container finished" podID="f4516146-b1cc-4e3c-89e0-518eb0ffb4d3" containerID="eddf782361fffe93fe30f21adb05a8343d54f86e3a1b567341f4dd5a10a7cbc6" exitCode=0 Jan 06 08:17:39 crc kubenswrapper[4784]: I0106 08:17:39.124783 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"f4516146-b1cc-4e3c-89e0-518eb0ffb4d3","Type":"ContainerDied","Data":"eddf782361fffe93fe30f21adb05a8343d54f86e3a1b567341f4dd5a10a7cbc6"} Jan 06 08:17:39 crc kubenswrapper[4784]: I0106 08:17:39.380689 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:39 crc kubenswrapper[4784]: I0106 08:17:39.390929 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-nm6wl" Jan 06 08:17:39 crc kubenswrapper[4784]: I0106 08:17:39.723907 4784 patch_prober.go:28] interesting pod/router-default-5444994796-8c87n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 06 08:17:39 crc kubenswrapper[4784]: [-]has-synced failed: reason withheld Jan 06 08:17:39 crc kubenswrapper[4784]: [+]process-running ok Jan 06 08:17:39 crc kubenswrapper[4784]: healthz check failed Jan 06 08:17:39 crc kubenswrapper[4784]: I0106 08:17:39.723964 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8c87n" podUID="b1d67235-5081-4c66-acaa-0620c30e170e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:40 crc kubenswrapper[4784]: I0106 08:17:40.701624 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-jcbhk" Jan 06 08:17:40 crc kubenswrapper[4784]: I0106 08:17:40.730214 4784 patch_prober.go:28] interesting pod/router-default-5444994796-8c87n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 06 08:17:40 crc kubenswrapper[4784]: [+]has-synced ok Jan 06 08:17:40 crc kubenswrapper[4784]: [+]process-running ok Jan 06 08:17:40 crc kubenswrapper[4784]: healthz check failed Jan 06 08:17:40 crc kubenswrapper[4784]: I0106 08:17:40.730273 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8c87n" podUID="b1d67235-5081-4c66-acaa-0620c30e170e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:17:41 crc kubenswrapper[4784]: I0106 08:17:41.717619 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:41 crc kubenswrapper[4784]: I0106 08:17:41.720294 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-8c87n" Jan 06 08:17:44 crc kubenswrapper[4784]: I0106 08:17:44.351165 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:17:44 crc kubenswrapper[4784]: I0106 08:17:44.351732 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:17:44 crc kubenswrapper[4784]: I0106 08:17:44.555222 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs\") pod \"network-metrics-daemon-xfktc\" (UID: \"e957a369-1cc7-450b-821f-3ee12341caef\") " pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:17:44 crc kubenswrapper[4784]: I0106 08:17:44.569798 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e957a369-1cc7-450b-821f-3ee12341caef-metrics-certs\") pod \"network-metrics-daemon-xfktc\" (UID: \"e957a369-1cc7-450b-821f-3ee12341caef\") " pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:17:44 crc kubenswrapper[4784]: I0106 08:17:44.818486 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xfktc" Jan 06 08:17:44 crc kubenswrapper[4784]: I0106 08:17:44.822400 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:44 crc kubenswrapper[4784]: I0106 08:17:44.828004 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:17:45 crc kubenswrapper[4784]: I0106 08:17:45.441094 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-fxbll" Jan 06 08:17:46 crc kubenswrapper[4784]: I0106 08:17:46.424155 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 06 08:17:46 crc kubenswrapper[4784]: I0106 08:17:46.430909 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 06 08:17:46 crc kubenswrapper[4784]: I0106 08:17:46.495831 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f4516146-b1cc-4e3c-89e0-518eb0ffb4d3-kube-api-access\") pod \"f4516146-b1cc-4e3c-89e0-518eb0ffb4d3\" (UID: \"f4516146-b1cc-4e3c-89e0-518eb0ffb4d3\") " Jan 06 08:17:46 crc kubenswrapper[4784]: I0106 08:17:46.495948 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7488414c-136d-40e5-85aa-cdbddc78e045-kube-api-access\") pod \"7488414c-136d-40e5-85aa-cdbddc78e045\" (UID: \"7488414c-136d-40e5-85aa-cdbddc78e045\") " Jan 06 08:17:46 crc kubenswrapper[4784]: I0106 08:17:46.495969 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7488414c-136d-40e5-85aa-cdbddc78e045-kubelet-dir\") pod \"7488414c-136d-40e5-85aa-cdbddc78e045\" (UID: \"7488414c-136d-40e5-85aa-cdbddc78e045\") " Jan 06 08:17:46 crc kubenswrapper[4784]: I0106 08:17:46.495986 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f4516146-b1cc-4e3c-89e0-518eb0ffb4d3-kubelet-dir\") pod \"f4516146-b1cc-4e3c-89e0-518eb0ffb4d3\" (UID: \"f4516146-b1cc-4e3c-89e0-518eb0ffb4d3\") " Jan 06 08:17:46 crc kubenswrapper[4784]: I0106 08:17:46.496024 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7488414c-136d-40e5-85aa-cdbddc78e045-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "7488414c-136d-40e5-85aa-cdbddc78e045" (UID: "7488414c-136d-40e5-85aa-cdbddc78e045"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:17:46 crc kubenswrapper[4784]: I0106 08:17:46.496155 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4516146-b1cc-4e3c-89e0-518eb0ffb4d3-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "f4516146-b1cc-4e3c-89e0-518eb0ffb4d3" (UID: "f4516146-b1cc-4e3c-89e0-518eb0ffb4d3"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:17:46 crc kubenswrapper[4784]: I0106 08:17:46.496501 4784 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7488414c-136d-40e5-85aa-cdbddc78e045-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 06 08:17:46 crc kubenswrapper[4784]: I0106 08:17:46.496516 4784 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f4516146-b1cc-4e3c-89e0-518eb0ffb4d3-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 06 08:17:46 crc kubenswrapper[4784]: I0106 08:17:46.502368 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4516146-b1cc-4e3c-89e0-518eb0ffb4d3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "f4516146-b1cc-4e3c-89e0-518eb0ffb4d3" (UID: "f4516146-b1cc-4e3c-89e0-518eb0ffb4d3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:17:46 crc kubenswrapper[4784]: I0106 08:17:46.516643 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7488414c-136d-40e5-85aa-cdbddc78e045-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7488414c-136d-40e5-85aa-cdbddc78e045" (UID: "7488414c-136d-40e5-85aa-cdbddc78e045"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:17:46 crc kubenswrapper[4784]: I0106 08:17:46.596998 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7488414c-136d-40e5-85aa-cdbddc78e045-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 06 08:17:46 crc kubenswrapper[4784]: I0106 08:17:46.597028 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f4516146-b1cc-4e3c-89e0-518eb0ffb4d3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 06 08:17:47 crc kubenswrapper[4784]: I0106 08:17:47.214768 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"7488414c-136d-40e5-85aa-cdbddc78e045","Type":"ContainerDied","Data":"ea0f301dae81780566ad8782ece73609c39cbde7ddcedd709d41d974172edb52"} Jan 06 08:17:47 crc kubenswrapper[4784]: I0106 08:17:47.215099 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ea0f301dae81780566ad8782ece73609c39cbde7ddcedd709d41d974172edb52" Jan 06 08:17:47 crc kubenswrapper[4784]: I0106 08:17:47.214780 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 06 08:17:47 crc kubenswrapper[4784]: I0106 08:17:47.217083 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"f4516146-b1cc-4e3c-89e0-518eb0ffb4d3","Type":"ContainerDied","Data":"911de97ad0526262ae202fff32ce587a741f44f4109065915d7309edc020f433"} Jan 06 08:17:47 crc kubenswrapper[4784]: I0106 08:17:47.217110 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="911de97ad0526262ae202fff32ce587a741f44f4109065915d7309edc020f433" Jan 06 08:17:47 crc kubenswrapper[4784]: I0106 08:17:47.217162 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 06 08:17:50 crc kubenswrapper[4784]: I0106 08:17:50.932031 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-xfktc"] Jan 06 08:17:50 crc kubenswrapper[4784]: W0106 08:17:50.941870 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode957a369_1cc7_450b_821f_3ee12341caef.slice/crio-bda5abb4791eeb8c842bef7787bdf552c8e41ca976ca64271ef441a8013ccde0 WatchSource:0}: Error finding container bda5abb4791eeb8c842bef7787bdf552c8e41ca976ca64271ef441a8013ccde0: Status 404 returned error can't find the container with id bda5abb4791eeb8c842bef7787bdf552c8e41ca976ca64271ef441a8013ccde0 Jan 06 08:17:51 crc kubenswrapper[4784]: I0106 08:17:51.245813 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-xfktc" event={"ID":"e957a369-1cc7-450b-821f-3ee12341caef","Type":"ContainerStarted","Data":"bda5abb4791eeb8c842bef7787bdf552c8e41ca976ca64271ef441a8013ccde0"} Jan 06 08:17:52 crc kubenswrapper[4784]: I0106 08:17:52.897758 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:18:04 crc kubenswrapper[4784]: I0106 08:18:04.410210 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 06 08:18:05 crc kubenswrapper[4784]: I0106 08:18:05.686065 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-v7nhq" Jan 06 08:18:07 crc kubenswrapper[4784]: E0106 08:18:07.402005 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 06 08:18:07 crc kubenswrapper[4784]: E0106 08:18:07.403194 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jhggz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-vnsnw_openshift-marketplace(01361f8d-987c-43b9-ab45-ee19576a9b90): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 06 08:18:07 crc kubenswrapper[4784]: E0106 08:18:07.404449 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-vnsnw" podUID="01361f8d-987c-43b9-ab45-ee19576a9b90" Jan 06 08:18:10 crc kubenswrapper[4784]: E0106 08:18:10.029748 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 06 08:18:10 crc kubenswrapper[4784]: E0106 08:18:10.030057 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-drmrx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-l4mh8_openshift-marketplace(f69c40cb-62ee-4bcd-b08a-89e767dcac83): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 06 08:18:10 crc kubenswrapper[4784]: E0106 08:18:10.031307 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-l4mh8" podUID="f69c40cb-62ee-4bcd-b08a-89e767dcac83" Jan 06 08:18:10 crc kubenswrapper[4784]: E0106 08:18:10.049604 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 06 08:18:10 crc kubenswrapper[4784]: E0106 08:18:10.049841 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p759p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-ffzbc_openshift-marketplace(971db4c5-67a0-42f5-b71f-042c91d097b5): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 06 08:18:10 crc kubenswrapper[4784]: E0106 08:18:10.051064 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-ffzbc" podUID="971db4c5-67a0-42f5-b71f-042c91d097b5" Jan 06 08:18:10 crc kubenswrapper[4784]: I0106 08:18:10.461414 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-xfktc" event={"ID":"e957a369-1cc7-450b-821f-3ee12341caef","Type":"ContainerStarted","Data":"28dd0e74c041830909d27c1116090f92556c6173d48cafa1816016a4a44b95bb"} Jan 06 08:18:12 crc kubenswrapper[4784]: E0106 08:18:12.199340 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-l4mh8" podUID="f69c40cb-62ee-4bcd-b08a-89e767dcac83" Jan 06 08:18:12 crc kubenswrapper[4784]: E0106 08:18:12.199646 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-vnsnw" podUID="01361f8d-987c-43b9-ab45-ee19576a9b90" Jan 06 08:18:12 crc kubenswrapper[4784]: E0106 08:18:12.201109 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-ffzbc" podUID="971db4c5-67a0-42f5-b71f-042c91d097b5" Jan 06 08:18:12 crc kubenswrapper[4784]: E0106 08:18:12.288352 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 06 08:18:12 crc kubenswrapper[4784]: E0106 08:18:12.288517 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l7dlj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-jgfwh_openshift-marketplace(b6eac71f-1d65-4542-9722-211fee770bba): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 06 08:18:12 crc kubenswrapper[4784]: E0106 08:18:12.289884 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-jgfwh" podUID="b6eac71f-1d65-4542-9722-211fee770bba" Jan 06 08:18:12 crc kubenswrapper[4784]: E0106 08:18:12.298696 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 06 08:18:12 crc kubenswrapper[4784]: E0106 08:18:12.299716 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wzd2v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-8l2cs_openshift-marketplace(d8eba439-a397-4536-8b2e-cde21cfc1384): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 06 08:18:12 crc kubenswrapper[4784]: E0106 08:18:12.300899 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-8l2cs" podUID="d8eba439-a397-4536-8b2e-cde21cfc1384" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.195573 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 06 08:18:14 crc kubenswrapper[4784]: E0106 08:18:14.196758 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4516146-b1cc-4e3c-89e0-518eb0ffb4d3" containerName="pruner" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.196779 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4516146-b1cc-4e3c-89e0-518eb0ffb4d3" containerName="pruner" Jan 06 08:18:14 crc kubenswrapper[4784]: E0106 08:18:14.196811 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7488414c-136d-40e5-85aa-cdbddc78e045" containerName="pruner" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.196821 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="7488414c-136d-40e5-85aa-cdbddc78e045" containerName="pruner" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.196970 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4516146-b1cc-4e3c-89e0-518eb0ffb4d3" containerName="pruner" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.196996 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="7488414c-136d-40e5-85aa-cdbddc78e045" containerName="pruner" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.200942 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.207789 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.208012 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.213367 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.351197 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.351274 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.360760 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/73f452b9-fdbb-4b29-8cbb-12e2fdc1268a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"73f452b9-fdbb-4b29-8cbb-12e2fdc1268a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.360916 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/73f452b9-fdbb-4b29-8cbb-12e2fdc1268a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"73f452b9-fdbb-4b29-8cbb-12e2fdc1268a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.462478 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/73f452b9-fdbb-4b29-8cbb-12e2fdc1268a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"73f452b9-fdbb-4b29-8cbb-12e2fdc1268a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.462555 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/73f452b9-fdbb-4b29-8cbb-12e2fdc1268a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"73f452b9-fdbb-4b29-8cbb-12e2fdc1268a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.462659 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/73f452b9-fdbb-4b29-8cbb-12e2fdc1268a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"73f452b9-fdbb-4b29-8cbb-12e2fdc1268a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.488082 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/73f452b9-fdbb-4b29-8cbb-12e2fdc1268a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"73f452b9-fdbb-4b29-8cbb-12e2fdc1268a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 06 08:18:14 crc kubenswrapper[4784]: I0106 08:18:14.532302 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 06 08:18:16 crc kubenswrapper[4784]: E0106 08:18:16.532899 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-8l2cs" podUID="d8eba439-a397-4536-8b2e-cde21cfc1384" Jan 06 08:18:16 crc kubenswrapper[4784]: E0106 08:18:16.533046 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-jgfwh" podUID="b6eac71f-1d65-4542-9722-211fee770bba" Jan 06 08:18:16 crc kubenswrapper[4784]: E0106 08:18:16.620048 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 06 08:18:16 crc kubenswrapper[4784]: E0106 08:18:16.620320 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jb7tc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-vph2c_openshift-marketplace(e179e7e8-0210-4afe-9fe2-cf76289c135c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 06 08:18:16 crc kubenswrapper[4784]: E0106 08:18:16.621885 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-vph2c" podUID="e179e7e8-0210-4afe-9fe2-cf76289c135c" Jan 06 08:18:16 crc kubenswrapper[4784]: E0106 08:18:16.628605 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 06 08:18:16 crc kubenswrapper[4784]: E0106 08:18:16.628809 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7h46l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-wv9d2_openshift-marketplace(77358ffe-6346-4747-9847-27c607f4a2a3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 06 08:18:16 crc kubenswrapper[4784]: E0106 08:18:16.629974 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-wv9d2" podUID="77358ffe-6346-4747-9847-27c607f4a2a3" Jan 06 08:18:16 crc kubenswrapper[4784]: E0106 08:18:16.631574 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 06 08:18:16 crc kubenswrapper[4784]: E0106 08:18:16.631981 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fmt6f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-q2b9x_openshift-marketplace(95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 06 08:18:16 crc kubenswrapper[4784]: E0106 08:18:16.633178 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-q2b9x" podUID="95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" Jan 06 08:18:17 crc kubenswrapper[4784]: I0106 08:18:17.045289 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 06 08:18:17 crc kubenswrapper[4784]: W0106 08:18:17.046446 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod73f452b9_fdbb_4b29_8cbb_12e2fdc1268a.slice/crio-69ba30007f1e6dcc83785a2d54f525c5308e5bb9a82cad8a9761099290de2f19 WatchSource:0}: Error finding container 69ba30007f1e6dcc83785a2d54f525c5308e5bb9a82cad8a9761099290de2f19: Status 404 returned error can't find the container with id 69ba30007f1e6dcc83785a2d54f525c5308e5bb9a82cad8a9761099290de2f19 Jan 06 08:18:17 crc kubenswrapper[4784]: I0106 08:18:17.512733 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"73f452b9-fdbb-4b29-8cbb-12e2fdc1268a","Type":"ContainerStarted","Data":"167fbda0f711b61cf30be3ba7e5a0e1d0016eb43cfced414c7814608ccbedc24"} Jan 06 08:18:17 crc kubenswrapper[4784]: I0106 08:18:17.513164 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"73f452b9-fdbb-4b29-8cbb-12e2fdc1268a","Type":"ContainerStarted","Data":"69ba30007f1e6dcc83785a2d54f525c5308e5bb9a82cad8a9761099290de2f19"} Jan 06 08:18:17 crc kubenswrapper[4784]: I0106 08:18:17.516266 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-xfktc" event={"ID":"e957a369-1cc7-450b-821f-3ee12341caef","Type":"ContainerStarted","Data":"025d206996ef367e2b81afd5c587d8309bb7faf329afac4e9d50d99f4d68ea0e"} Jan 06 08:18:17 crc kubenswrapper[4784]: E0106 08:18:17.517134 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-wv9d2" podUID="77358ffe-6346-4747-9847-27c607f4a2a3" Jan 06 08:18:17 crc kubenswrapper[4784]: E0106 08:18:17.517360 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-vph2c" podUID="e179e7e8-0210-4afe-9fe2-cf76289c135c" Jan 06 08:18:17 crc kubenswrapper[4784]: E0106 08:18:17.517619 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-q2b9x" podUID="95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" Jan 06 08:18:17 crc kubenswrapper[4784]: I0106 08:18:17.533555 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=3.5335088260000003 podStartE2EDuration="3.533508826s" podCreationTimestamp="2026-01-06 08:18:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:18:17.531529416 +0000 UTC m=+199.577702253" watchObservedRunningTime="2026-01-06 08:18:17.533508826 +0000 UTC m=+199.579681663" Jan 06 08:18:17 crc kubenswrapper[4784]: I0106 08:18:17.592051 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-xfktc" podStartSLOduration=176.592023166 podStartE2EDuration="2m56.592023166s" podCreationTimestamp="2026-01-06 08:15:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:18:17.588565133 +0000 UTC m=+199.634737980" watchObservedRunningTime="2026-01-06 08:18:17.592023166 +0000 UTC m=+199.638196003" Jan 06 08:18:18 crc kubenswrapper[4784]: I0106 08:18:18.797489 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 06 08:18:18 crc kubenswrapper[4784]: I0106 08:18:18.798731 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 06 08:18:18 crc kubenswrapper[4784]: I0106 08:18:18.803665 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 06 08:18:18 crc kubenswrapper[4784]: I0106 08:18:18.962322 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-kube-api-access\") pod \"installer-9-crc\" (UID: \"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 06 08:18:18 crc kubenswrapper[4784]: I0106 08:18:18.962417 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-var-lock\") pod \"installer-9-crc\" (UID: \"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 06 08:18:18 crc kubenswrapper[4784]: I0106 08:18:18.962466 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 06 08:18:19 crc kubenswrapper[4784]: I0106 08:18:19.064063 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-var-lock\") pod \"installer-9-crc\" (UID: \"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 06 08:18:19 crc kubenswrapper[4784]: I0106 08:18:19.064136 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 06 08:18:19 crc kubenswrapper[4784]: I0106 08:18:19.064177 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-var-lock\") pod \"installer-9-crc\" (UID: \"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 06 08:18:19 crc kubenswrapper[4784]: I0106 08:18:19.064198 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-kube-api-access\") pod \"installer-9-crc\" (UID: \"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 06 08:18:19 crc kubenswrapper[4784]: I0106 08:18:19.064254 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 06 08:18:19 crc kubenswrapper[4784]: I0106 08:18:19.089464 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-kube-api-access\") pod \"installer-9-crc\" (UID: \"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 06 08:18:19 crc kubenswrapper[4784]: I0106 08:18:19.222782 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 06 08:18:19 crc kubenswrapper[4784]: I0106 08:18:19.529312 4784 generic.go:334] "Generic (PLEG): container finished" podID="73f452b9-fdbb-4b29-8cbb-12e2fdc1268a" containerID="167fbda0f711b61cf30be3ba7e5a0e1d0016eb43cfced414c7814608ccbedc24" exitCode=0 Jan 06 08:18:19 crc kubenswrapper[4784]: I0106 08:18:19.529397 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"73f452b9-fdbb-4b29-8cbb-12e2fdc1268a","Type":"ContainerDied","Data":"167fbda0f711b61cf30be3ba7e5a0e1d0016eb43cfced414c7814608ccbedc24"} Jan 06 08:18:19 crc kubenswrapper[4784]: I0106 08:18:19.636821 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 06 08:18:20 crc kubenswrapper[4784]: I0106 08:18:20.538163 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4","Type":"ContainerStarted","Data":"9b0a623f4c512cbf69decdc7ba6f650dd0d4ed837c27988e0d7d47c002e6e68c"} Jan 06 08:18:20 crc kubenswrapper[4784]: I0106 08:18:20.539005 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4","Type":"ContainerStarted","Data":"de854cb6f57c269c6ae3a6500cd835fe85e706645ea57d78923402d57db70b7c"} Jan 06 08:18:20 crc kubenswrapper[4784]: I0106 08:18:20.850026 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 06 08:18:20 crc kubenswrapper[4784]: I0106 08:18:20.869330 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.869304059 podStartE2EDuration="2.869304059s" podCreationTimestamp="2026-01-06 08:18:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:18:20.56459065 +0000 UTC m=+202.610763487" watchObservedRunningTime="2026-01-06 08:18:20.869304059 +0000 UTC m=+202.915476896" Jan 06 08:18:20 crc kubenswrapper[4784]: I0106 08:18:20.998836 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/73f452b9-fdbb-4b29-8cbb-12e2fdc1268a-kubelet-dir\") pod \"73f452b9-fdbb-4b29-8cbb-12e2fdc1268a\" (UID: \"73f452b9-fdbb-4b29-8cbb-12e2fdc1268a\") " Jan 06 08:18:20 crc kubenswrapper[4784]: I0106 08:18:20.998964 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/73f452b9-fdbb-4b29-8cbb-12e2fdc1268a-kube-api-access\") pod \"73f452b9-fdbb-4b29-8cbb-12e2fdc1268a\" (UID: \"73f452b9-fdbb-4b29-8cbb-12e2fdc1268a\") " Jan 06 08:18:20 crc kubenswrapper[4784]: I0106 08:18:20.999054 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/73f452b9-fdbb-4b29-8cbb-12e2fdc1268a-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "73f452b9-fdbb-4b29-8cbb-12e2fdc1268a" (UID: "73f452b9-fdbb-4b29-8cbb-12e2fdc1268a"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:18:20 crc kubenswrapper[4784]: I0106 08:18:20.999324 4784 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/73f452b9-fdbb-4b29-8cbb-12e2fdc1268a-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:21 crc kubenswrapper[4784]: I0106 08:18:21.009394 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73f452b9-fdbb-4b29-8cbb-12e2fdc1268a-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "73f452b9-fdbb-4b29-8cbb-12e2fdc1268a" (UID: "73f452b9-fdbb-4b29-8cbb-12e2fdc1268a"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:18:21 crc kubenswrapper[4784]: I0106 08:18:21.100610 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/73f452b9-fdbb-4b29-8cbb-12e2fdc1268a-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:21 crc kubenswrapper[4784]: I0106 08:18:21.547080 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"73f452b9-fdbb-4b29-8cbb-12e2fdc1268a","Type":"ContainerDied","Data":"69ba30007f1e6dcc83785a2d54f525c5308e5bb9a82cad8a9761099290de2f19"} Jan 06 08:18:21 crc kubenswrapper[4784]: I0106 08:18:21.547162 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="69ba30007f1e6dcc83785a2d54f525c5308e5bb9a82cad8a9761099290de2f19" Jan 06 08:18:21 crc kubenswrapper[4784]: I0106 08:18:21.547116 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 06 08:18:23 crc kubenswrapper[4784]: I0106 08:18:23.673130 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pln6n"] Jan 06 08:18:24 crc kubenswrapper[4784]: I0106 08:18:24.566349 4784 generic.go:334] "Generic (PLEG): container finished" podID="01361f8d-987c-43b9-ab45-ee19576a9b90" containerID="c3861165c564d45f6be0bf9a74bcfe9999eb97db4b84181ed2d274b31850b9e0" exitCode=0 Jan 06 08:18:24 crc kubenswrapper[4784]: I0106 08:18:24.566457 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vnsnw" event={"ID":"01361f8d-987c-43b9-ab45-ee19576a9b90","Type":"ContainerDied","Data":"c3861165c564d45f6be0bf9a74bcfe9999eb97db4b84181ed2d274b31850b9e0"} Jan 06 08:18:24 crc kubenswrapper[4784]: I0106 08:18:24.570455 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ffzbc" event={"ID":"971db4c5-67a0-42f5-b71f-042c91d097b5","Type":"ContainerStarted","Data":"eb574f8dc15bda5a7b93b7b35f3b24e7aa42545f4dd75ab2c8b197a9f3f4778d"} Jan 06 08:18:25 crc kubenswrapper[4784]: I0106 08:18:25.578286 4784 generic.go:334] "Generic (PLEG): container finished" podID="971db4c5-67a0-42f5-b71f-042c91d097b5" containerID="eb574f8dc15bda5a7b93b7b35f3b24e7aa42545f4dd75ab2c8b197a9f3f4778d" exitCode=0 Jan 06 08:18:25 crc kubenswrapper[4784]: I0106 08:18:25.578384 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ffzbc" event={"ID":"971db4c5-67a0-42f5-b71f-042c91d097b5","Type":"ContainerDied","Data":"eb574f8dc15bda5a7b93b7b35f3b24e7aa42545f4dd75ab2c8b197a9f3f4778d"} Jan 06 08:18:25 crc kubenswrapper[4784]: I0106 08:18:25.582382 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vnsnw" event={"ID":"01361f8d-987c-43b9-ab45-ee19576a9b90","Type":"ContainerStarted","Data":"8b0ab0213a7005dc2ec66c6fc8d4f007417dbf27ac0f8a12bfb699446cf8f851"} Jan 06 08:18:25 crc kubenswrapper[4784]: I0106 08:18:25.633029 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vnsnw" podStartSLOduration=3.621477016 podStartE2EDuration="54.633005834s" podCreationTimestamp="2026-01-06 08:17:31 +0000 UTC" firstStartedPulling="2026-01-06 08:17:33.982582032 +0000 UTC m=+156.028754869" lastFinishedPulling="2026-01-06 08:18:24.99411085 +0000 UTC m=+207.040283687" observedRunningTime="2026-01-06 08:18:25.627977146 +0000 UTC m=+207.674150003" watchObservedRunningTime="2026-01-06 08:18:25.633005834 +0000 UTC m=+207.679178671" Jan 06 08:18:26 crc kubenswrapper[4784]: I0106 08:18:26.591760 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ffzbc" event={"ID":"971db4c5-67a0-42f5-b71f-042c91d097b5","Type":"ContainerStarted","Data":"5ce5a9cdee08bce79dcf1b256e5891aca20fd27a854fc039981205c1613891d2"} Jan 06 08:18:26 crc kubenswrapper[4784]: I0106 08:18:26.621489 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ffzbc" podStartSLOduration=3.560368618 podStartE2EDuration="55.621463931s" podCreationTimestamp="2026-01-06 08:17:31 +0000 UTC" firstStartedPulling="2026-01-06 08:17:33.952105962 +0000 UTC m=+155.998278799" lastFinishedPulling="2026-01-06 08:18:26.013201275 +0000 UTC m=+208.059374112" observedRunningTime="2026-01-06 08:18:26.615662885 +0000 UTC m=+208.661835752" watchObservedRunningTime="2026-01-06 08:18:26.621463931 +0000 UTC m=+208.667636808" Jan 06 08:18:27 crc kubenswrapper[4784]: I0106 08:18:27.599521 4784 generic.go:334] "Generic (PLEG): container finished" podID="f69c40cb-62ee-4bcd-b08a-89e767dcac83" containerID="0d371f1b6b07ad968f79146bd387c138949fd14ad75734af87e9638a7b8eebe8" exitCode=0 Jan 06 08:18:27 crc kubenswrapper[4784]: I0106 08:18:27.599596 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4mh8" event={"ID":"f69c40cb-62ee-4bcd-b08a-89e767dcac83","Type":"ContainerDied","Data":"0d371f1b6b07ad968f79146bd387c138949fd14ad75734af87e9638a7b8eebe8"} Jan 06 08:18:32 crc kubenswrapper[4784]: I0106 08:18:32.136144 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:18:32 crc kubenswrapper[4784]: I0106 08:18:32.136826 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:18:32 crc kubenswrapper[4784]: I0106 08:18:32.324367 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:18:32 crc kubenswrapper[4784]: I0106 08:18:32.324421 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:18:32 crc kubenswrapper[4784]: I0106 08:18:32.371489 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:18:32 crc kubenswrapper[4784]: I0106 08:18:32.375185 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:18:32 crc kubenswrapper[4784]: I0106 08:18:32.697724 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:18:32 crc kubenswrapper[4784]: I0106 08:18:32.810340 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:18:34 crc kubenswrapper[4784]: I0106 08:18:34.646106 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4mh8" event={"ID":"f69c40cb-62ee-4bcd-b08a-89e767dcac83","Type":"ContainerStarted","Data":"d16cc841daec82058415447388936ee6efb5bb015691a454aefc8b5c2279a956"} Jan 06 08:18:34 crc kubenswrapper[4784]: I0106 08:18:34.674966 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-l4mh8" podStartSLOduration=5.549076078 podStartE2EDuration="1m5.674943043s" podCreationTimestamp="2026-01-06 08:17:29 +0000 UTC" firstStartedPulling="2026-01-06 08:17:32.952894915 +0000 UTC m=+154.999067752" lastFinishedPulling="2026-01-06 08:18:33.07876187 +0000 UTC m=+215.124934717" observedRunningTime="2026-01-06 08:18:34.670121452 +0000 UTC m=+216.716294329" watchObservedRunningTime="2026-01-06 08:18:34.674943043 +0000 UTC m=+216.721115880" Jan 06 08:18:34 crc kubenswrapper[4784]: I0106 08:18:34.746692 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vnsnw"] Jan 06 08:18:34 crc kubenswrapper[4784]: I0106 08:18:34.747006 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vnsnw" podUID="01361f8d-987c-43b9-ab45-ee19576a9b90" containerName="registry-server" containerID="cri-o://8b0ab0213a7005dc2ec66c6fc8d4f007417dbf27ac0f8a12bfb699446cf8f851" gracePeriod=2 Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.171090 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.343502 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01361f8d-987c-43b9-ab45-ee19576a9b90-catalog-content\") pod \"01361f8d-987c-43b9-ab45-ee19576a9b90\" (UID: \"01361f8d-987c-43b9-ab45-ee19576a9b90\") " Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.343563 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01361f8d-987c-43b9-ab45-ee19576a9b90-utilities\") pod \"01361f8d-987c-43b9-ab45-ee19576a9b90\" (UID: \"01361f8d-987c-43b9-ab45-ee19576a9b90\") " Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.343584 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhggz\" (UniqueName: \"kubernetes.io/projected/01361f8d-987c-43b9-ab45-ee19576a9b90-kube-api-access-jhggz\") pod \"01361f8d-987c-43b9-ab45-ee19576a9b90\" (UID: \"01361f8d-987c-43b9-ab45-ee19576a9b90\") " Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.344308 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01361f8d-987c-43b9-ab45-ee19576a9b90-utilities" (OuterVolumeSpecName: "utilities") pod "01361f8d-987c-43b9-ab45-ee19576a9b90" (UID: "01361f8d-987c-43b9-ab45-ee19576a9b90"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.344677 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01361f8d-987c-43b9-ab45-ee19576a9b90-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.351843 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01361f8d-987c-43b9-ab45-ee19576a9b90-kube-api-access-jhggz" (OuterVolumeSpecName: "kube-api-access-jhggz") pod "01361f8d-987c-43b9-ab45-ee19576a9b90" (UID: "01361f8d-987c-43b9-ab45-ee19576a9b90"). InnerVolumeSpecName "kube-api-access-jhggz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.374479 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01361f8d-987c-43b9-ab45-ee19576a9b90-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "01361f8d-987c-43b9-ab45-ee19576a9b90" (UID: "01361f8d-987c-43b9-ab45-ee19576a9b90"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.445405 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01361f8d-987c-43b9-ab45-ee19576a9b90-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.445451 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhggz\" (UniqueName: \"kubernetes.io/projected/01361f8d-987c-43b9-ab45-ee19576a9b90-kube-api-access-jhggz\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.654198 4784 generic.go:334] "Generic (PLEG): container finished" podID="95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" containerID="dbbd59cb346a7e83c25addb28fc441a45e14323e6617827618ab005b46a6ed35" exitCode=0 Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.654287 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q2b9x" event={"ID":"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2","Type":"ContainerDied","Data":"dbbd59cb346a7e83c25addb28fc441a45e14323e6617827618ab005b46a6ed35"} Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.660875 4784 generic.go:334] "Generic (PLEG): container finished" podID="b6eac71f-1d65-4542-9722-211fee770bba" containerID="d7e1e8933b5f35deb377922887e9b4816c5c691360bb5ad81b3b067388d71eb0" exitCode=0 Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.660985 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jgfwh" event={"ID":"b6eac71f-1d65-4542-9722-211fee770bba","Type":"ContainerDied","Data":"d7e1e8933b5f35deb377922887e9b4816c5c691360bb5ad81b3b067388d71eb0"} Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.665213 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wv9d2" event={"ID":"77358ffe-6346-4747-9847-27c607f4a2a3","Type":"ContainerStarted","Data":"fd2f1b2f3226e386bf9f5d94360e5e787ee63355f16aa08ee90fff37943aa4fd"} Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.668512 4784 generic.go:334] "Generic (PLEG): container finished" podID="d8eba439-a397-4536-8b2e-cde21cfc1384" containerID="03b86fdc94b556970b8cbcd0e4750d3e0b40be99a375de31781eb1b9de611602" exitCode=0 Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.668610 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8l2cs" event={"ID":"d8eba439-a397-4536-8b2e-cde21cfc1384","Type":"ContainerDied","Data":"03b86fdc94b556970b8cbcd0e4750d3e0b40be99a375de31781eb1b9de611602"} Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.671981 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vph2c" event={"ID":"e179e7e8-0210-4afe-9fe2-cf76289c135c","Type":"ContainerStarted","Data":"203826463f9532d764610fb191635af0a280b9d4be58609afb8e34d054ab90eb"} Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.674941 4784 generic.go:334] "Generic (PLEG): container finished" podID="01361f8d-987c-43b9-ab45-ee19576a9b90" containerID="8b0ab0213a7005dc2ec66c6fc8d4f007417dbf27ac0f8a12bfb699446cf8f851" exitCode=0 Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.674994 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vnsnw" event={"ID":"01361f8d-987c-43b9-ab45-ee19576a9b90","Type":"ContainerDied","Data":"8b0ab0213a7005dc2ec66c6fc8d4f007417dbf27ac0f8a12bfb699446cf8f851"} Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.675022 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vnsnw" event={"ID":"01361f8d-987c-43b9-ab45-ee19576a9b90","Type":"ContainerDied","Data":"3db91bfdf81c84231c0166e23d8eb13798bbb8068dd5b19b132db959258d025b"} Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.675057 4784 scope.go:117] "RemoveContainer" containerID="8b0ab0213a7005dc2ec66c6fc8d4f007417dbf27ac0f8a12bfb699446cf8f851" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.675210 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vnsnw" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.703903 4784 scope.go:117] "RemoveContainer" containerID="c3861165c564d45f6be0bf9a74bcfe9999eb97db4b84181ed2d274b31850b9e0" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.810945 4784 scope.go:117] "RemoveContainer" containerID="caad2516700b692964efc883e42a0c31861e632e50144ef9c195906a5b1ab281" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.830614 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vnsnw"] Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.831920 4784 scope.go:117] "RemoveContainer" containerID="8b0ab0213a7005dc2ec66c6fc8d4f007417dbf27ac0f8a12bfb699446cf8f851" Jan 06 08:18:35 crc kubenswrapper[4784]: E0106 08:18:35.832442 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b0ab0213a7005dc2ec66c6fc8d4f007417dbf27ac0f8a12bfb699446cf8f851\": container with ID starting with 8b0ab0213a7005dc2ec66c6fc8d4f007417dbf27ac0f8a12bfb699446cf8f851 not found: ID does not exist" containerID="8b0ab0213a7005dc2ec66c6fc8d4f007417dbf27ac0f8a12bfb699446cf8f851" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.832478 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b0ab0213a7005dc2ec66c6fc8d4f007417dbf27ac0f8a12bfb699446cf8f851"} err="failed to get container status \"8b0ab0213a7005dc2ec66c6fc8d4f007417dbf27ac0f8a12bfb699446cf8f851\": rpc error: code = NotFound desc = could not find container \"8b0ab0213a7005dc2ec66c6fc8d4f007417dbf27ac0f8a12bfb699446cf8f851\": container with ID starting with 8b0ab0213a7005dc2ec66c6fc8d4f007417dbf27ac0f8a12bfb699446cf8f851 not found: ID does not exist" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.832522 4784 scope.go:117] "RemoveContainer" containerID="c3861165c564d45f6be0bf9a74bcfe9999eb97db4b84181ed2d274b31850b9e0" Jan 06 08:18:35 crc kubenswrapper[4784]: E0106 08:18:35.832775 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3861165c564d45f6be0bf9a74bcfe9999eb97db4b84181ed2d274b31850b9e0\": container with ID starting with c3861165c564d45f6be0bf9a74bcfe9999eb97db4b84181ed2d274b31850b9e0 not found: ID does not exist" containerID="c3861165c564d45f6be0bf9a74bcfe9999eb97db4b84181ed2d274b31850b9e0" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.832793 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3861165c564d45f6be0bf9a74bcfe9999eb97db4b84181ed2d274b31850b9e0"} err="failed to get container status \"c3861165c564d45f6be0bf9a74bcfe9999eb97db4b84181ed2d274b31850b9e0\": rpc error: code = NotFound desc = could not find container \"c3861165c564d45f6be0bf9a74bcfe9999eb97db4b84181ed2d274b31850b9e0\": container with ID starting with c3861165c564d45f6be0bf9a74bcfe9999eb97db4b84181ed2d274b31850b9e0 not found: ID does not exist" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.832805 4784 scope.go:117] "RemoveContainer" containerID="caad2516700b692964efc883e42a0c31861e632e50144ef9c195906a5b1ab281" Jan 06 08:18:35 crc kubenswrapper[4784]: E0106 08:18:35.836449 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"caad2516700b692964efc883e42a0c31861e632e50144ef9c195906a5b1ab281\": container with ID starting with caad2516700b692964efc883e42a0c31861e632e50144ef9c195906a5b1ab281 not found: ID does not exist" containerID="caad2516700b692964efc883e42a0c31861e632e50144ef9c195906a5b1ab281" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.836478 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"caad2516700b692964efc883e42a0c31861e632e50144ef9c195906a5b1ab281"} err="failed to get container status \"caad2516700b692964efc883e42a0c31861e632e50144ef9c195906a5b1ab281\": rpc error: code = NotFound desc = could not find container \"caad2516700b692964efc883e42a0c31861e632e50144ef9c195906a5b1ab281\": container with ID starting with caad2516700b692964efc883e42a0c31861e632e50144ef9c195906a5b1ab281 not found: ID does not exist" Jan 06 08:18:35 crc kubenswrapper[4784]: I0106 08:18:35.839280 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vnsnw"] Jan 06 08:18:36 crc kubenswrapper[4784]: I0106 08:18:36.392462 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01361f8d-987c-43b9-ab45-ee19576a9b90" path="/var/lib/kubelet/pods/01361f8d-987c-43b9-ab45-ee19576a9b90/volumes" Jan 06 08:18:37 crc kubenswrapper[4784]: I0106 08:18:37.700338 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8l2cs" event={"ID":"d8eba439-a397-4536-8b2e-cde21cfc1384","Type":"ContainerStarted","Data":"a970ffb935c82d0563173431d519993eaf34e48c059bf132cedab43e885d76cb"} Jan 06 08:18:37 crc kubenswrapper[4784]: I0106 08:18:37.703624 4784 generic.go:334] "Generic (PLEG): container finished" podID="e179e7e8-0210-4afe-9fe2-cf76289c135c" containerID="203826463f9532d764610fb191635af0a280b9d4be58609afb8e34d054ab90eb" exitCode=0 Jan 06 08:18:37 crc kubenswrapper[4784]: I0106 08:18:37.703651 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vph2c" event={"ID":"e179e7e8-0210-4afe-9fe2-cf76289c135c","Type":"ContainerDied","Data":"203826463f9532d764610fb191635af0a280b9d4be58609afb8e34d054ab90eb"} Jan 06 08:18:37 crc kubenswrapper[4784]: I0106 08:18:37.706270 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q2b9x" event={"ID":"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2","Type":"ContainerStarted","Data":"8f6a5e2c612441040410a3a9a2ec8706464baae31c81f10fbe553cf63d70c7c9"} Jan 06 08:18:37 crc kubenswrapper[4784]: I0106 08:18:37.719323 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jgfwh" event={"ID":"b6eac71f-1d65-4542-9722-211fee770bba","Type":"ContainerStarted","Data":"edc01d92310dc24dfe3255462cc87817661581d3d6b76fdab6cd293aade7e070"} Jan 06 08:18:37 crc kubenswrapper[4784]: I0106 08:18:37.723565 4784 generic.go:334] "Generic (PLEG): container finished" podID="77358ffe-6346-4747-9847-27c607f4a2a3" containerID="fd2f1b2f3226e386bf9f5d94360e5e787ee63355f16aa08ee90fff37943aa4fd" exitCode=0 Jan 06 08:18:37 crc kubenswrapper[4784]: I0106 08:18:37.723606 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wv9d2" event={"ID":"77358ffe-6346-4747-9847-27c607f4a2a3","Type":"ContainerDied","Data":"fd2f1b2f3226e386bf9f5d94360e5e787ee63355f16aa08ee90fff37943aa4fd"} Jan 06 08:18:37 crc kubenswrapper[4784]: I0106 08:18:37.727684 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8l2cs" podStartSLOduration=5.457774359 podStartE2EDuration="1m8.727670497s" podCreationTimestamp="2026-01-06 08:17:29 +0000 UTC" firstStartedPulling="2026-01-06 08:17:32.965582502 +0000 UTC m=+155.011755339" lastFinishedPulling="2026-01-06 08:18:36.23547861 +0000 UTC m=+218.281651477" observedRunningTime="2026-01-06 08:18:37.726097071 +0000 UTC m=+219.772269918" watchObservedRunningTime="2026-01-06 08:18:37.727670497 +0000 UTC m=+219.773843334" Jan 06 08:18:37 crc kubenswrapper[4784]: I0106 08:18:37.742956 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jgfwh" podStartSLOduration=5.496670559 podStartE2EDuration="1m8.742938039s" podCreationTimestamp="2026-01-06 08:17:29 +0000 UTC" firstStartedPulling="2026-01-06 08:17:32.954045469 +0000 UTC m=+155.000218306" lastFinishedPulling="2026-01-06 08:18:36.200312949 +0000 UTC m=+218.246485786" observedRunningTime="2026-01-06 08:18:37.740843515 +0000 UTC m=+219.787016362" watchObservedRunningTime="2026-01-06 08:18:37.742938039 +0000 UTC m=+219.789110876" Jan 06 08:18:37 crc kubenswrapper[4784]: I0106 08:18:37.780472 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-q2b9x" podStartSLOduration=5.275106146 podStartE2EDuration="1m8.780454093s" podCreationTimestamp="2026-01-06 08:17:29 +0000 UTC" firstStartedPulling="2026-01-06 08:17:32.873376122 +0000 UTC m=+154.919548959" lastFinishedPulling="2026-01-06 08:18:36.378724069 +0000 UTC m=+218.424896906" observedRunningTime="2026-01-06 08:18:37.778954859 +0000 UTC m=+219.825127696" watchObservedRunningTime="2026-01-06 08:18:37.780454093 +0000 UTC m=+219.826626930" Jan 06 08:18:38 crc kubenswrapper[4784]: I0106 08:18:38.835862 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vph2c" event={"ID":"e179e7e8-0210-4afe-9fe2-cf76289c135c","Type":"ContainerStarted","Data":"d53f74ca56413ed672c396a8a87b32564640e040a115c0c2a1b16731789f01ff"} Jan 06 08:18:38 crc kubenswrapper[4784]: I0106 08:18:38.856752 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vph2c" podStartSLOduration=2.672307096 podStartE2EDuration="1m6.85673076s" podCreationTimestamp="2026-01-06 08:17:32 +0000 UTC" firstStartedPulling="2026-01-06 08:17:33.977011388 +0000 UTC m=+156.023184225" lastFinishedPulling="2026-01-06 08:18:38.161435052 +0000 UTC m=+220.207607889" observedRunningTime="2026-01-06 08:18:38.856273744 +0000 UTC m=+220.902446581" watchObservedRunningTime="2026-01-06 08:18:38.85673076 +0000 UTC m=+220.902903597" Jan 06 08:18:39 crc kubenswrapper[4784]: I0106 08:18:39.847756 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wv9d2" event={"ID":"77358ffe-6346-4747-9847-27c607f4a2a3","Type":"ContainerStarted","Data":"e6fe7b13e9b02dfc7edce1ce5ead850e3e2cbe5a800b6ef826423378d528d7b6"} Jan 06 08:18:39 crc kubenswrapper[4784]: I0106 08:18:39.874164 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wv9d2" podStartSLOduration=3.304620507 podStartE2EDuration="1m7.874138496s" podCreationTimestamp="2026-01-06 08:17:32 +0000 UTC" firstStartedPulling="2026-01-06 08:17:33.966298037 +0000 UTC m=+156.012470874" lastFinishedPulling="2026-01-06 08:18:38.535816026 +0000 UTC m=+220.581988863" observedRunningTime="2026-01-06 08:18:39.871527383 +0000 UTC m=+221.917700290" watchObservedRunningTime="2026-01-06 08:18:39.874138496 +0000 UTC m=+221.920311363" Jan 06 08:18:39 crc kubenswrapper[4784]: I0106 08:18:39.906033 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:18:39 crc kubenswrapper[4784]: I0106 08:18:39.906557 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:18:39 crc kubenswrapper[4784]: I0106 08:18:39.939135 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:18:39 crc kubenswrapper[4784]: I0106 08:18:39.939201 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:18:39 crc kubenswrapper[4784]: I0106 08:18:39.975301 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:18:40 crc kubenswrapper[4784]: I0106 08:18:40.011947 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:18:40 crc kubenswrapper[4784]: I0106 08:18:40.208321 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:18:40 crc kubenswrapper[4784]: I0106 08:18:40.208381 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:18:40 crc kubenswrapper[4784]: I0106 08:18:40.278584 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:18:40 crc kubenswrapper[4784]: I0106 08:18:40.297722 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:18:40 crc kubenswrapper[4784]: I0106 08:18:40.298139 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:18:40 crc kubenswrapper[4784]: I0106 08:18:40.355371 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:18:40 crc kubenswrapper[4784]: I0106 08:18:40.902457 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:18:41 crc kubenswrapper[4784]: I0106 08:18:41.898880 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:18:41 crc kubenswrapper[4784]: I0106 08:18:41.901360 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:18:42 crc kubenswrapper[4784]: I0106 08:18:42.545783 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q2b9x"] Jan 06 08:18:42 crc kubenswrapper[4784]: I0106 08:18:42.876732 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:18:42 crc kubenswrapper[4784]: I0106 08:18:42.877628 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:18:43 crc kubenswrapper[4784]: I0106 08:18:43.152438 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l4mh8"] Jan 06 08:18:43 crc kubenswrapper[4784]: I0106 08:18:43.152742 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-l4mh8" podUID="f69c40cb-62ee-4bcd-b08a-89e767dcac83" containerName="registry-server" containerID="cri-o://d16cc841daec82058415447388936ee6efb5bb015691a454aefc8b5c2279a956" gracePeriod=2 Jan 06 08:18:43 crc kubenswrapper[4784]: I0106 08:18:43.213878 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:18:43 crc kubenswrapper[4784]: I0106 08:18:43.214354 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:18:43 crc kubenswrapper[4784]: I0106 08:18:43.869944 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-q2b9x" podUID="95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" containerName="registry-server" containerID="cri-o://8f6a5e2c612441040410a3a9a2ec8706464baae31c81f10fbe553cf63d70c7c9" gracePeriod=2 Jan 06 08:18:43 crc kubenswrapper[4784]: I0106 08:18:43.927939 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wv9d2" podUID="77358ffe-6346-4747-9847-27c607f4a2a3" containerName="registry-server" probeResult="failure" output=< Jan 06 08:18:43 crc kubenswrapper[4784]: timeout: failed to connect service ":50051" within 1s Jan 06 08:18:43 crc kubenswrapper[4784]: > Jan 06 08:18:44 crc kubenswrapper[4784]: I0106 08:18:44.259936 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vph2c" podUID="e179e7e8-0210-4afe-9fe2-cf76289c135c" containerName="registry-server" probeResult="failure" output=< Jan 06 08:18:44 crc kubenswrapper[4784]: timeout: failed to connect service ":50051" within 1s Jan 06 08:18:44 crc kubenswrapper[4784]: > Jan 06 08:18:45 crc kubenswrapper[4784]: E0106 08:18:45.689460 4784 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.378s" Jan 06 08:18:45 crc kubenswrapper[4784]: I0106 08:18:45.691239 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:18:45 crc kubenswrapper[4784]: I0106 08:18:45.691328 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:18:45 crc kubenswrapper[4784]: I0106 08:18:45.708245 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:18:45 crc kubenswrapper[4784]: I0106 08:18:45.724219 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 08:18:45 crc kubenswrapper[4784]: I0106 08:18:45.725164 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f" gracePeriod=600 Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.091827 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.194432 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-catalog-content\") pod \"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2\" (UID: \"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2\") " Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.194586 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-utilities\") pod \"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2\" (UID: \"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2\") " Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.194615 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmt6f\" (UniqueName: \"kubernetes.io/projected/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-kube-api-access-fmt6f\") pod \"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2\" (UID: \"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2\") " Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.195820 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-utilities" (OuterVolumeSpecName: "utilities") pod "95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" (UID: "95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.205659 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-kube-api-access-fmt6f" (OuterVolumeSpecName: "kube-api-access-fmt6f") pod "95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" (UID: "95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2"). InnerVolumeSpecName "kube-api-access-fmt6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.250589 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" (UID: "95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.269693 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.298115 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.298163 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.298174 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmt6f\" (UniqueName: \"kubernetes.io/projected/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2-kube-api-access-fmt6f\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.399591 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f69c40cb-62ee-4bcd-b08a-89e767dcac83-utilities\") pod \"f69c40cb-62ee-4bcd-b08a-89e767dcac83\" (UID: \"f69c40cb-62ee-4bcd-b08a-89e767dcac83\") " Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.399671 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f69c40cb-62ee-4bcd-b08a-89e767dcac83-catalog-content\") pod \"f69c40cb-62ee-4bcd-b08a-89e767dcac83\" (UID: \"f69c40cb-62ee-4bcd-b08a-89e767dcac83\") " Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.399813 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-drmrx\" (UniqueName: \"kubernetes.io/projected/f69c40cb-62ee-4bcd-b08a-89e767dcac83-kube-api-access-drmrx\") pod \"f69c40cb-62ee-4bcd-b08a-89e767dcac83\" (UID: \"f69c40cb-62ee-4bcd-b08a-89e767dcac83\") " Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.404276 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f69c40cb-62ee-4bcd-b08a-89e767dcac83-utilities" (OuterVolumeSpecName: "utilities") pod "f69c40cb-62ee-4bcd-b08a-89e767dcac83" (UID: "f69c40cb-62ee-4bcd-b08a-89e767dcac83"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.404293 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f69c40cb-62ee-4bcd-b08a-89e767dcac83-kube-api-access-drmrx" (OuterVolumeSpecName: "kube-api-access-drmrx") pod "f69c40cb-62ee-4bcd-b08a-89e767dcac83" (UID: "f69c40cb-62ee-4bcd-b08a-89e767dcac83"). InnerVolumeSpecName "kube-api-access-drmrx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.459478 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f69c40cb-62ee-4bcd-b08a-89e767dcac83-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f69c40cb-62ee-4bcd-b08a-89e767dcac83" (UID: "f69c40cb-62ee-4bcd-b08a-89e767dcac83"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.502420 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f69c40cb-62ee-4bcd-b08a-89e767dcac83-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.502599 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f69c40cb-62ee-4bcd-b08a-89e767dcac83-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.502628 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-drmrx\" (UniqueName: \"kubernetes.io/projected/f69c40cb-62ee-4bcd-b08a-89e767dcac83-kube-api-access-drmrx\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.732768 4784 generic.go:334] "Generic (PLEG): container finished" podID="f69c40cb-62ee-4bcd-b08a-89e767dcac83" containerID="d16cc841daec82058415447388936ee6efb5bb015691a454aefc8b5c2279a956" exitCode=0 Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.732880 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4mh8" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.732893 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4mh8" event={"ID":"f69c40cb-62ee-4bcd-b08a-89e767dcac83","Type":"ContainerDied","Data":"d16cc841daec82058415447388936ee6efb5bb015691a454aefc8b5c2279a956"} Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.733259 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4mh8" event={"ID":"f69c40cb-62ee-4bcd-b08a-89e767dcac83","Type":"ContainerDied","Data":"de11d18dab049eeac8f4b5499ee92b1806efdb29acf783f843ebcdf31bc44cdf"} Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.733287 4784 scope.go:117] "RemoveContainer" containerID="d16cc841daec82058415447388936ee6efb5bb015691a454aefc8b5c2279a956" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.736389 4784 generic.go:334] "Generic (PLEG): container finished" podID="95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" containerID="8f6a5e2c612441040410a3a9a2ec8706464baae31c81f10fbe553cf63d70c7c9" exitCode=0 Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.736435 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q2b9x" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.736494 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q2b9x" event={"ID":"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2","Type":"ContainerDied","Data":"8f6a5e2c612441040410a3a9a2ec8706464baae31c81f10fbe553cf63d70c7c9"} Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.736556 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q2b9x" event={"ID":"95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2","Type":"ContainerDied","Data":"496b38fe99d47e7afff33267a4d60ec89c7372584f8a77d9da61629b88884530"} Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.739668 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f" exitCode=0 Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.739708 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f"} Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.739733 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"b6b7d740cd5be04e2d170bfab6a98c1e20b0f10be11fbce538c9869051ad40e7"} Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.768145 4784 scope.go:117] "RemoveContainer" containerID="0d371f1b6b07ad968f79146bd387c138949fd14ad75734af87e9638a7b8eebe8" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.774737 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q2b9x"] Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.777660 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-q2b9x"] Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.793612 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l4mh8"] Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.802601 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-l4mh8"] Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.803757 4784 scope.go:117] "RemoveContainer" containerID="b8b52196551a4f27f5f48330cb6bb0d2d4d88eb67c737ca88977b6818c94cc24" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.848276 4784 scope.go:117] "RemoveContainer" containerID="d16cc841daec82058415447388936ee6efb5bb015691a454aefc8b5c2279a956" Jan 06 08:18:46 crc kubenswrapper[4784]: E0106 08:18:46.849064 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d16cc841daec82058415447388936ee6efb5bb015691a454aefc8b5c2279a956\": container with ID starting with d16cc841daec82058415447388936ee6efb5bb015691a454aefc8b5c2279a956 not found: ID does not exist" containerID="d16cc841daec82058415447388936ee6efb5bb015691a454aefc8b5c2279a956" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.849113 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d16cc841daec82058415447388936ee6efb5bb015691a454aefc8b5c2279a956"} err="failed to get container status \"d16cc841daec82058415447388936ee6efb5bb015691a454aefc8b5c2279a956\": rpc error: code = NotFound desc = could not find container \"d16cc841daec82058415447388936ee6efb5bb015691a454aefc8b5c2279a956\": container with ID starting with d16cc841daec82058415447388936ee6efb5bb015691a454aefc8b5c2279a956 not found: ID does not exist" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.849209 4784 scope.go:117] "RemoveContainer" containerID="0d371f1b6b07ad968f79146bd387c138949fd14ad75734af87e9638a7b8eebe8" Jan 06 08:18:46 crc kubenswrapper[4784]: E0106 08:18:46.849835 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d371f1b6b07ad968f79146bd387c138949fd14ad75734af87e9638a7b8eebe8\": container with ID starting with 0d371f1b6b07ad968f79146bd387c138949fd14ad75734af87e9638a7b8eebe8 not found: ID does not exist" containerID="0d371f1b6b07ad968f79146bd387c138949fd14ad75734af87e9638a7b8eebe8" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.849861 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d371f1b6b07ad968f79146bd387c138949fd14ad75734af87e9638a7b8eebe8"} err="failed to get container status \"0d371f1b6b07ad968f79146bd387c138949fd14ad75734af87e9638a7b8eebe8\": rpc error: code = NotFound desc = could not find container \"0d371f1b6b07ad968f79146bd387c138949fd14ad75734af87e9638a7b8eebe8\": container with ID starting with 0d371f1b6b07ad968f79146bd387c138949fd14ad75734af87e9638a7b8eebe8 not found: ID does not exist" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.849878 4784 scope.go:117] "RemoveContainer" containerID="b8b52196551a4f27f5f48330cb6bb0d2d4d88eb67c737ca88977b6818c94cc24" Jan 06 08:18:46 crc kubenswrapper[4784]: E0106 08:18:46.850411 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8b52196551a4f27f5f48330cb6bb0d2d4d88eb67c737ca88977b6818c94cc24\": container with ID starting with b8b52196551a4f27f5f48330cb6bb0d2d4d88eb67c737ca88977b6818c94cc24 not found: ID does not exist" containerID="b8b52196551a4f27f5f48330cb6bb0d2d4d88eb67c737ca88977b6818c94cc24" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.850482 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8b52196551a4f27f5f48330cb6bb0d2d4d88eb67c737ca88977b6818c94cc24"} err="failed to get container status \"b8b52196551a4f27f5f48330cb6bb0d2d4d88eb67c737ca88977b6818c94cc24\": rpc error: code = NotFound desc = could not find container \"b8b52196551a4f27f5f48330cb6bb0d2d4d88eb67c737ca88977b6818c94cc24\": container with ID starting with b8b52196551a4f27f5f48330cb6bb0d2d4d88eb67c737ca88977b6818c94cc24 not found: ID does not exist" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.850537 4784 scope.go:117] "RemoveContainer" containerID="8f6a5e2c612441040410a3a9a2ec8706464baae31c81f10fbe553cf63d70c7c9" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.881259 4784 scope.go:117] "RemoveContainer" containerID="dbbd59cb346a7e83c25addb28fc441a45e14323e6617827618ab005b46a6ed35" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.901642 4784 scope.go:117] "RemoveContainer" containerID="e1053e7b44abe5ac456b5a44771d20f45cdd3e5507f68e2d433042e72f9639db" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.929998 4784 scope.go:117] "RemoveContainer" containerID="8f6a5e2c612441040410a3a9a2ec8706464baae31c81f10fbe553cf63d70c7c9" Jan 06 08:18:46 crc kubenswrapper[4784]: E0106 08:18:46.930698 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f6a5e2c612441040410a3a9a2ec8706464baae31c81f10fbe553cf63d70c7c9\": container with ID starting with 8f6a5e2c612441040410a3a9a2ec8706464baae31c81f10fbe553cf63d70c7c9 not found: ID does not exist" containerID="8f6a5e2c612441040410a3a9a2ec8706464baae31c81f10fbe553cf63d70c7c9" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.930770 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f6a5e2c612441040410a3a9a2ec8706464baae31c81f10fbe553cf63d70c7c9"} err="failed to get container status \"8f6a5e2c612441040410a3a9a2ec8706464baae31c81f10fbe553cf63d70c7c9\": rpc error: code = NotFound desc = could not find container \"8f6a5e2c612441040410a3a9a2ec8706464baae31c81f10fbe553cf63d70c7c9\": container with ID starting with 8f6a5e2c612441040410a3a9a2ec8706464baae31c81f10fbe553cf63d70c7c9 not found: ID does not exist" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.930814 4784 scope.go:117] "RemoveContainer" containerID="dbbd59cb346a7e83c25addb28fc441a45e14323e6617827618ab005b46a6ed35" Jan 06 08:18:46 crc kubenswrapper[4784]: E0106 08:18:46.931403 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbbd59cb346a7e83c25addb28fc441a45e14323e6617827618ab005b46a6ed35\": container with ID starting with dbbd59cb346a7e83c25addb28fc441a45e14323e6617827618ab005b46a6ed35 not found: ID does not exist" containerID="dbbd59cb346a7e83c25addb28fc441a45e14323e6617827618ab005b46a6ed35" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.931491 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbbd59cb346a7e83c25addb28fc441a45e14323e6617827618ab005b46a6ed35"} err="failed to get container status \"dbbd59cb346a7e83c25addb28fc441a45e14323e6617827618ab005b46a6ed35\": rpc error: code = NotFound desc = could not find container \"dbbd59cb346a7e83c25addb28fc441a45e14323e6617827618ab005b46a6ed35\": container with ID starting with dbbd59cb346a7e83c25addb28fc441a45e14323e6617827618ab005b46a6ed35 not found: ID does not exist" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.931538 4784 scope.go:117] "RemoveContainer" containerID="e1053e7b44abe5ac456b5a44771d20f45cdd3e5507f68e2d433042e72f9639db" Jan 06 08:18:46 crc kubenswrapper[4784]: E0106 08:18:46.932155 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1053e7b44abe5ac456b5a44771d20f45cdd3e5507f68e2d433042e72f9639db\": container with ID starting with e1053e7b44abe5ac456b5a44771d20f45cdd3e5507f68e2d433042e72f9639db not found: ID does not exist" containerID="e1053e7b44abe5ac456b5a44771d20f45cdd3e5507f68e2d433042e72f9639db" Jan 06 08:18:46 crc kubenswrapper[4784]: I0106 08:18:46.932235 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1053e7b44abe5ac456b5a44771d20f45cdd3e5507f68e2d433042e72f9639db"} err="failed to get container status \"e1053e7b44abe5ac456b5a44771d20f45cdd3e5507f68e2d433042e72f9639db\": rpc error: code = NotFound desc = could not find container \"e1053e7b44abe5ac456b5a44771d20f45cdd3e5507f68e2d433042e72f9639db\": container with ID starting with e1053e7b44abe5ac456b5a44771d20f45cdd3e5507f68e2d433042e72f9639db not found: ID does not exist" Jan 06 08:18:48 crc kubenswrapper[4784]: I0106 08:18:48.324166 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" path="/var/lib/kubelet/pods/95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2/volumes" Jan 06 08:18:48 crc kubenswrapper[4784]: I0106 08:18:48.325522 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f69c40cb-62ee-4bcd-b08a-89e767dcac83" path="/var/lib/kubelet/pods/f69c40cb-62ee-4bcd-b08a-89e767dcac83/volumes" Jan 06 08:18:48 crc kubenswrapper[4784]: I0106 08:18:48.710408 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" podUID="86a7348c-24b4-4138-83a9-0587e28e72e4" containerName="oauth-openshift" containerID="cri-o://b221fcd4b37b92e1e240a994e989bf8480099df131253fc56f5d95560d0da85b" gracePeriod=15 Jan 06 08:18:49 crc kubenswrapper[4784]: I0106 08:18:49.770679 4784 generic.go:334] "Generic (PLEG): container finished" podID="86a7348c-24b4-4138-83a9-0587e28e72e4" containerID="b221fcd4b37b92e1e240a994e989bf8480099df131253fc56f5d95560d0da85b" exitCode=0 Jan 06 08:18:49 crc kubenswrapper[4784]: I0106 08:18:49.770775 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" event={"ID":"86a7348c-24b4-4138-83a9-0587e28e72e4","Type":"ContainerDied","Data":"b221fcd4b37b92e1e240a994e989bf8480099df131253fc56f5d95560d0da85b"} Jan 06 08:18:49 crc kubenswrapper[4784]: I0106 08:18:49.987368 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.294666 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.464402 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-cliconfig\") pod \"86a7348c-24b4-4138-83a9-0587e28e72e4\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.464529 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-service-ca\") pod \"86a7348c-24b4-4138-83a9-0587e28e72e4\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.464627 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-ocp-branding-template\") pod \"86a7348c-24b4-4138-83a9-0587e28e72e4\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.464691 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-login\") pod \"86a7348c-24b4-4138-83a9-0587e28e72e4\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.464735 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-router-certs\") pod \"86a7348c-24b4-4138-83a9-0587e28e72e4\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.464792 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nb6d7\" (UniqueName: \"kubernetes.io/projected/86a7348c-24b4-4138-83a9-0587e28e72e4-kube-api-access-nb6d7\") pod \"86a7348c-24b4-4138-83a9-0587e28e72e4\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.464845 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-trusted-ca-bundle\") pod \"86a7348c-24b4-4138-83a9-0587e28e72e4\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.464894 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/86a7348c-24b4-4138-83a9-0587e28e72e4-audit-dir\") pod \"86a7348c-24b4-4138-83a9-0587e28e72e4\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.464968 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-session\") pod \"86a7348c-24b4-4138-83a9-0587e28e72e4\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.465043 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-audit-policies\") pod \"86a7348c-24b4-4138-83a9-0587e28e72e4\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.465109 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-serving-cert\") pod \"86a7348c-24b4-4138-83a9-0587e28e72e4\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.465158 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-error\") pod \"86a7348c-24b4-4138-83a9-0587e28e72e4\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.465229 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-provider-selection\") pod \"86a7348c-24b4-4138-83a9-0587e28e72e4\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.465305 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-idp-0-file-data\") pod \"86a7348c-24b4-4138-83a9-0587e28e72e4\" (UID: \"86a7348c-24b4-4138-83a9-0587e28e72e4\") " Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.466260 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-69879bb87d-g5vgw"] Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.467611 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "86a7348c-24b4-4138-83a9-0587e28e72e4" (UID: "86a7348c-24b4-4138-83a9-0587e28e72e4"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.468530 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "86a7348c-24b4-4138-83a9-0587e28e72e4" (UID: "86a7348c-24b4-4138-83a9-0587e28e72e4"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.468521 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "86a7348c-24b4-4138-83a9-0587e28e72e4" (UID: "86a7348c-24b4-4138-83a9-0587e28e72e4"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.469032 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "86a7348c-24b4-4138-83a9-0587e28e72e4" (UID: "86a7348c-24b4-4138-83a9-0587e28e72e4"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.469112 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86a7348c-24b4-4138-83a9-0587e28e72e4-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "86a7348c-24b4-4138-83a9-0587e28e72e4" (UID: "86a7348c-24b4-4138-83a9-0587e28e72e4"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:18:50 crc kubenswrapper[4784]: E0106 08:18:50.469868 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" containerName="extract-utilities" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.469894 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" containerName="extract-utilities" Jan 06 08:18:50 crc kubenswrapper[4784]: E0106 08:18:50.469904 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f69c40cb-62ee-4bcd-b08a-89e767dcac83" containerName="extract-content" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.469912 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f69c40cb-62ee-4bcd-b08a-89e767dcac83" containerName="extract-content" Jan 06 08:18:50 crc kubenswrapper[4784]: E0106 08:18:50.469920 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" containerName="registry-server" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.469928 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" containerName="registry-server" Jan 06 08:18:50 crc kubenswrapper[4784]: E0106 08:18:50.469940 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01361f8d-987c-43b9-ab45-ee19576a9b90" containerName="extract-utilities" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.469946 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="01361f8d-987c-43b9-ab45-ee19576a9b90" containerName="extract-utilities" Jan 06 08:18:50 crc kubenswrapper[4784]: E0106 08:18:50.469959 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73f452b9-fdbb-4b29-8cbb-12e2fdc1268a" containerName="pruner" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.469966 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="73f452b9-fdbb-4b29-8cbb-12e2fdc1268a" containerName="pruner" Jan 06 08:18:50 crc kubenswrapper[4784]: E0106 08:18:50.469974 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f69c40cb-62ee-4bcd-b08a-89e767dcac83" containerName="registry-server" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.469980 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f69c40cb-62ee-4bcd-b08a-89e767dcac83" containerName="registry-server" Jan 06 08:18:50 crc kubenswrapper[4784]: E0106 08:18:50.470051 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" containerName="extract-content" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.470059 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" containerName="extract-content" Jan 06 08:18:50 crc kubenswrapper[4784]: E0106 08:18:50.470074 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01361f8d-987c-43b9-ab45-ee19576a9b90" containerName="extract-content" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.470082 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="01361f8d-987c-43b9-ab45-ee19576a9b90" containerName="extract-content" Jan 06 08:18:50 crc kubenswrapper[4784]: E0106 08:18:50.470092 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f69c40cb-62ee-4bcd-b08a-89e767dcac83" containerName="extract-utilities" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.470100 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f69c40cb-62ee-4bcd-b08a-89e767dcac83" containerName="extract-utilities" Jan 06 08:18:50 crc kubenswrapper[4784]: E0106 08:18:50.470109 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86a7348c-24b4-4138-83a9-0587e28e72e4" containerName="oauth-openshift" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.470117 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="86a7348c-24b4-4138-83a9-0587e28e72e4" containerName="oauth-openshift" Jan 06 08:18:50 crc kubenswrapper[4784]: E0106 08:18:50.470129 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01361f8d-987c-43b9-ab45-ee19576a9b90" containerName="registry-server" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.470136 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="01361f8d-987c-43b9-ab45-ee19576a9b90" containerName="registry-server" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.470367 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="86a7348c-24b4-4138-83a9-0587e28e72e4" containerName="oauth-openshift" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.470385 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f69c40cb-62ee-4bcd-b08a-89e767dcac83" containerName="registry-server" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.470395 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="95cc1e10-8fd8-41f6-9b00-51f6f56a3cf2" containerName="registry-server" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.470409 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="01361f8d-987c-43b9-ab45-ee19576a9b90" containerName="registry-server" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.470418 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="73f452b9-fdbb-4b29-8cbb-12e2fdc1268a" containerName="pruner" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.474944 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.483350 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86a7348c-24b4-4138-83a9-0587e28e72e4-kube-api-access-nb6d7" (OuterVolumeSpecName: "kube-api-access-nb6d7") pod "86a7348c-24b4-4138-83a9-0587e28e72e4" (UID: "86a7348c-24b4-4138-83a9-0587e28e72e4"). InnerVolumeSpecName "kube-api-access-nb6d7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.484067 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "86a7348c-24b4-4138-83a9-0587e28e72e4" (UID: "86a7348c-24b4-4138-83a9-0587e28e72e4"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.484484 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "86a7348c-24b4-4138-83a9-0587e28e72e4" (UID: "86a7348c-24b4-4138-83a9-0587e28e72e4"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.485080 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "86a7348c-24b4-4138-83a9-0587e28e72e4" (UID: "86a7348c-24b4-4138-83a9-0587e28e72e4"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.485684 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "86a7348c-24b4-4138-83a9-0587e28e72e4" (UID: "86a7348c-24b4-4138-83a9-0587e28e72e4"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.485986 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "86a7348c-24b4-4138-83a9-0587e28e72e4" (UID: "86a7348c-24b4-4138-83a9-0587e28e72e4"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.486866 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "86a7348c-24b4-4138-83a9-0587e28e72e4" (UID: "86a7348c-24b4-4138-83a9-0587e28e72e4"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.487652 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "86a7348c-24b4-4138-83a9-0587e28e72e4" (UID: "86a7348c-24b4-4138-83a9-0587e28e72e4"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.490492 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "86a7348c-24b4-4138-83a9-0587e28e72e4" (UID: "86a7348c-24b4-4138-83a9-0587e28e72e4"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.490509 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-69879bb87d-g5vgw"] Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.566693 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.566732 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.566750 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nb6d7\" (UniqueName: \"kubernetes.io/projected/86a7348c-24b4-4138-83a9-0587e28e72e4-kube-api-access-nb6d7\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.566764 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.566779 4784 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/86a7348c-24b4-4138-83a9-0587e28e72e4-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.566794 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.566807 4784 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.566819 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.566829 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.566842 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.566857 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.566871 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.566883 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.566895 4784 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/86a7348c-24b4-4138-83a9-0587e28e72e4-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.668824 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-session\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.668892 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-router-certs\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.668924 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-cliconfig\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.668958 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x52hc\" (UniqueName: \"kubernetes.io/projected/727232b2-b4ee-41c8-8d87-1a3b366620ba-kube-api-access-x52hc\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.669048 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-service-ca\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.669075 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-user-template-login\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.669101 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/727232b2-b4ee-41c8-8d87-1a3b366620ba-audit-policies\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.669132 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/727232b2-b4ee-41c8-8d87-1a3b366620ba-audit-dir\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.669176 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-user-template-error\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.669237 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.669273 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.669299 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-serving-cert\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.669325 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.669392 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.770526 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/727232b2-b4ee-41c8-8d87-1a3b366620ba-audit-policies\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.770594 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/727232b2-b4ee-41c8-8d87-1a3b366620ba-audit-dir\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.770635 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-user-template-login\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.770696 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-user-template-error\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.770724 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.770753 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.770778 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-serving-cert\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.770811 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.770845 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.770855 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/727232b2-b4ee-41c8-8d87-1a3b366620ba-audit-dir\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.770897 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-session\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.772530 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-router-certs\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.772580 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-cliconfig\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.772606 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x52hc\" (UniqueName: \"kubernetes.io/projected/727232b2-b4ee-41c8-8d87-1a3b366620ba-kube-api-access-x52hc\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.772647 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-service-ca\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.773195 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/727232b2-b4ee-41c8-8d87-1a3b366620ba-audit-policies\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.773465 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-service-ca\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.773505 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.773918 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-cliconfig\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.778359 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.778734 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-serving-cert\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.781916 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" event={"ID":"86a7348c-24b4-4138-83a9-0587e28e72e4","Type":"ContainerDied","Data":"49822981b20da786ce18453d8fd73e4d7b4b0369ae67a380ac0a52ee9635e055"} Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.781997 4784 scope.go:117] "RemoveContainer" containerID="b221fcd4b37b92e1e240a994e989bf8480099df131253fc56f5d95560d0da85b" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.782006 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pln6n" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.782566 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-router-certs\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.786492 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-user-template-login\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.787464 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-user-template-error\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.787526 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-session\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.787714 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.790892 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/727232b2-b4ee-41c8-8d87-1a3b366620ba-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.802075 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x52hc\" (UniqueName: \"kubernetes.io/projected/727232b2-b4ee-41c8-8d87-1a3b366620ba-kube-api-access-x52hc\") pod \"oauth-openshift-69879bb87d-g5vgw\" (UID: \"727232b2-b4ee-41c8-8d87-1a3b366620ba\") " pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.845518 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.856094 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pln6n"] Jan 06 08:18:50 crc kubenswrapper[4784]: I0106 08:18:50.860965 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pln6n"] Jan 06 08:18:50 crc kubenswrapper[4784]: E0106 08:18:50.910210 4784 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod86a7348c_24b4_4138_83a9_0587e28e72e4.slice/crio-49822981b20da786ce18453d8fd73e4d7b4b0369ae67a380ac0a52ee9635e055\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod86a7348c_24b4_4138_83a9_0587e28e72e4.slice\": RecentStats: unable to find data in memory cache]" Jan 06 08:18:51 crc kubenswrapper[4784]: I0106 08:18:51.324717 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-69879bb87d-g5vgw"] Jan 06 08:18:51 crc kubenswrapper[4784]: W0106 08:18:51.339783 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod727232b2_b4ee_41c8_8d87_1a3b366620ba.slice/crio-02b993b52f9fc9024ff6f59cb0a47a621a5d8ac7d8ce60cd241bf7c407fcb1b3 WatchSource:0}: Error finding container 02b993b52f9fc9024ff6f59cb0a47a621a5d8ac7d8ce60cd241bf7c407fcb1b3: Status 404 returned error can't find the container with id 02b993b52f9fc9024ff6f59cb0a47a621a5d8ac7d8ce60cd241bf7c407fcb1b3 Jan 06 08:18:51 crc kubenswrapper[4784]: I0106 08:18:51.793769 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" event={"ID":"727232b2-b4ee-41c8-8d87-1a3b366620ba","Type":"ContainerStarted","Data":"7a5e8d2c328468ec0e151dd075c27ba1baa4eb7a997408739b9b51800a9afa89"} Jan 06 08:18:51 crc kubenswrapper[4784]: I0106 08:18:51.794367 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" event={"ID":"727232b2-b4ee-41c8-8d87-1a3b366620ba","Type":"ContainerStarted","Data":"02b993b52f9fc9024ff6f59cb0a47a621a5d8ac7d8ce60cd241bf7c407fcb1b3"} Jan 06 08:18:51 crc kubenswrapper[4784]: I0106 08:18:51.794392 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:51 crc kubenswrapper[4784]: I0106 08:18:51.821658 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" podStartSLOduration=28.821633358 podStartE2EDuration="28.821633358s" podCreationTimestamp="2026-01-06 08:18:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:18:51.817678559 +0000 UTC m=+233.863851406" watchObservedRunningTime="2026-01-06 08:18:51.821633358 +0000 UTC m=+233.867806195" Jan 06 08:18:52 crc kubenswrapper[4784]: I0106 08:18:52.168016 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-69879bb87d-g5vgw" Jan 06 08:18:52 crc kubenswrapper[4784]: I0106 08:18:52.321274 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86a7348c-24b4-4138-83a9-0587e28e72e4" path="/var/lib/kubelet/pods/86a7348c-24b4-4138-83a9-0587e28e72e4/volumes" Jan 06 08:18:52 crc kubenswrapper[4784]: I0106 08:18:52.941738 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:18:53 crc kubenswrapper[4784]: I0106 08:18:53.010843 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:18:53 crc kubenswrapper[4784]: I0106 08:18:53.259937 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:18:53 crc kubenswrapper[4784]: I0106 08:18:53.307320 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:18:54 crc kubenswrapper[4784]: I0106 08:18:54.547524 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vph2c"] Jan 06 08:18:54 crc kubenswrapper[4784]: I0106 08:18:54.813085 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vph2c" podUID="e179e7e8-0210-4afe-9fe2-cf76289c135c" containerName="registry-server" containerID="cri-o://d53f74ca56413ed672c396a8a87b32564640e040a115c0c2a1b16731789f01ff" gracePeriod=2 Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.776020 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.821704 4784 generic.go:334] "Generic (PLEG): container finished" podID="e179e7e8-0210-4afe-9fe2-cf76289c135c" containerID="d53f74ca56413ed672c396a8a87b32564640e040a115c0c2a1b16731789f01ff" exitCode=0 Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.821784 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vph2c" Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.821782 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vph2c" event={"ID":"e179e7e8-0210-4afe-9fe2-cf76289c135c","Type":"ContainerDied","Data":"d53f74ca56413ed672c396a8a87b32564640e040a115c0c2a1b16731789f01ff"} Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.822061 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vph2c" event={"ID":"e179e7e8-0210-4afe-9fe2-cf76289c135c","Type":"ContainerDied","Data":"ac636755247a7edcb4827798a9708c08334355dae9a1817bb8fd0cf7f876d2d3"} Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.822137 4784 scope.go:117] "RemoveContainer" containerID="d53f74ca56413ed672c396a8a87b32564640e040a115c0c2a1b16731789f01ff" Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.847538 4784 scope.go:117] "RemoveContainer" containerID="203826463f9532d764610fb191635af0a280b9d4be58609afb8e34d054ab90eb" Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.881508 4784 scope.go:117] "RemoveContainer" containerID="76276fc720381770fc564f5eff49be2d83a888cb01205195199c24dea796b320" Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.901521 4784 scope.go:117] "RemoveContainer" containerID="d53f74ca56413ed672c396a8a87b32564640e040a115c0c2a1b16731789f01ff" Jan 06 08:18:55 crc kubenswrapper[4784]: E0106 08:18:55.905101 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d53f74ca56413ed672c396a8a87b32564640e040a115c0c2a1b16731789f01ff\": container with ID starting with d53f74ca56413ed672c396a8a87b32564640e040a115c0c2a1b16731789f01ff not found: ID does not exist" containerID="d53f74ca56413ed672c396a8a87b32564640e040a115c0c2a1b16731789f01ff" Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.905248 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d53f74ca56413ed672c396a8a87b32564640e040a115c0c2a1b16731789f01ff"} err="failed to get container status \"d53f74ca56413ed672c396a8a87b32564640e040a115c0c2a1b16731789f01ff\": rpc error: code = NotFound desc = could not find container \"d53f74ca56413ed672c396a8a87b32564640e040a115c0c2a1b16731789f01ff\": container with ID starting with d53f74ca56413ed672c396a8a87b32564640e040a115c0c2a1b16731789f01ff not found: ID does not exist" Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.905296 4784 scope.go:117] "RemoveContainer" containerID="203826463f9532d764610fb191635af0a280b9d4be58609afb8e34d054ab90eb" Jan 06 08:18:55 crc kubenswrapper[4784]: E0106 08:18:55.905923 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"203826463f9532d764610fb191635af0a280b9d4be58609afb8e34d054ab90eb\": container with ID starting with 203826463f9532d764610fb191635af0a280b9d4be58609afb8e34d054ab90eb not found: ID does not exist" containerID="203826463f9532d764610fb191635af0a280b9d4be58609afb8e34d054ab90eb" Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.905954 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"203826463f9532d764610fb191635af0a280b9d4be58609afb8e34d054ab90eb"} err="failed to get container status \"203826463f9532d764610fb191635af0a280b9d4be58609afb8e34d054ab90eb\": rpc error: code = NotFound desc = could not find container \"203826463f9532d764610fb191635af0a280b9d4be58609afb8e34d054ab90eb\": container with ID starting with 203826463f9532d764610fb191635af0a280b9d4be58609afb8e34d054ab90eb not found: ID does not exist" Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.905975 4784 scope.go:117] "RemoveContainer" containerID="76276fc720381770fc564f5eff49be2d83a888cb01205195199c24dea796b320" Jan 06 08:18:55 crc kubenswrapper[4784]: E0106 08:18:55.906572 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76276fc720381770fc564f5eff49be2d83a888cb01205195199c24dea796b320\": container with ID starting with 76276fc720381770fc564f5eff49be2d83a888cb01205195199c24dea796b320 not found: ID does not exist" containerID="76276fc720381770fc564f5eff49be2d83a888cb01205195199c24dea796b320" Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.906611 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76276fc720381770fc564f5eff49be2d83a888cb01205195199c24dea796b320"} err="failed to get container status \"76276fc720381770fc564f5eff49be2d83a888cb01205195199c24dea796b320\": rpc error: code = NotFound desc = could not find container \"76276fc720381770fc564f5eff49be2d83a888cb01205195199c24dea796b320\": container with ID starting with 76276fc720381770fc564f5eff49be2d83a888cb01205195199c24dea796b320 not found: ID does not exist" Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.924353 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e179e7e8-0210-4afe-9fe2-cf76289c135c-utilities\") pod \"e179e7e8-0210-4afe-9fe2-cf76289c135c\" (UID: \"e179e7e8-0210-4afe-9fe2-cf76289c135c\") " Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.924616 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e179e7e8-0210-4afe-9fe2-cf76289c135c-catalog-content\") pod \"e179e7e8-0210-4afe-9fe2-cf76289c135c\" (UID: \"e179e7e8-0210-4afe-9fe2-cf76289c135c\") " Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.924707 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jb7tc\" (UniqueName: \"kubernetes.io/projected/e179e7e8-0210-4afe-9fe2-cf76289c135c-kube-api-access-jb7tc\") pod \"e179e7e8-0210-4afe-9fe2-cf76289c135c\" (UID: \"e179e7e8-0210-4afe-9fe2-cf76289c135c\") " Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.925967 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e179e7e8-0210-4afe-9fe2-cf76289c135c-utilities" (OuterVolumeSpecName: "utilities") pod "e179e7e8-0210-4afe-9fe2-cf76289c135c" (UID: "e179e7e8-0210-4afe-9fe2-cf76289c135c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:18:55 crc kubenswrapper[4784]: I0106 08:18:55.933991 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e179e7e8-0210-4afe-9fe2-cf76289c135c-kube-api-access-jb7tc" (OuterVolumeSpecName: "kube-api-access-jb7tc") pod "e179e7e8-0210-4afe-9fe2-cf76289c135c" (UID: "e179e7e8-0210-4afe-9fe2-cf76289c135c"). InnerVolumeSpecName "kube-api-access-jb7tc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:18:56 crc kubenswrapper[4784]: I0106 08:18:56.026277 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jb7tc\" (UniqueName: \"kubernetes.io/projected/e179e7e8-0210-4afe-9fe2-cf76289c135c-kube-api-access-jb7tc\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:56 crc kubenswrapper[4784]: I0106 08:18:56.026332 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e179e7e8-0210-4afe-9fe2-cf76289c135c-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:56 crc kubenswrapper[4784]: I0106 08:18:56.074932 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e179e7e8-0210-4afe-9fe2-cf76289c135c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e179e7e8-0210-4afe-9fe2-cf76289c135c" (UID: "e179e7e8-0210-4afe-9fe2-cf76289c135c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:18:56 crc kubenswrapper[4784]: I0106 08:18:56.128013 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e179e7e8-0210-4afe-9fe2-cf76289c135c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:18:56 crc kubenswrapper[4784]: I0106 08:18:56.165500 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vph2c"] Jan 06 08:18:56 crc kubenswrapper[4784]: I0106 08:18:56.170019 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vph2c"] Jan 06 08:18:56 crc kubenswrapper[4784]: I0106 08:18:56.323986 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e179e7e8-0210-4afe-9fe2-cf76289c135c" path="/var/lib/kubelet/pods/e179e7e8-0210-4afe-9fe2-cf76289c135c/volumes" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.809314 4784 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 06 08:18:57 crc kubenswrapper[4784]: E0106 08:18:57.810319 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e179e7e8-0210-4afe-9fe2-cf76289c135c" containerName="registry-server" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.810347 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e179e7e8-0210-4afe-9fe2-cf76289c135c" containerName="registry-server" Jan 06 08:18:57 crc kubenswrapper[4784]: E0106 08:18:57.810376 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e179e7e8-0210-4afe-9fe2-cf76289c135c" containerName="extract-content" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.810390 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e179e7e8-0210-4afe-9fe2-cf76289c135c" containerName="extract-content" Jan 06 08:18:57 crc kubenswrapper[4784]: E0106 08:18:57.810413 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e179e7e8-0210-4afe-9fe2-cf76289c135c" containerName="extract-utilities" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.810426 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e179e7e8-0210-4afe-9fe2-cf76289c135c" containerName="extract-utilities" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.810708 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e179e7e8-0210-4afe-9fe2-cf76289c135c" containerName="registry-server" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.811495 4784 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.811789 4784 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.811818 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.811995 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98" gracePeriod=15 Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.812044 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff" gracePeriod=15 Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.812088 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a" gracePeriod=15 Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.812040 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2" gracePeriod=15 Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.812088 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532" gracePeriod=15 Jan 06 08:18:57 crc kubenswrapper[4784]: E0106 08:18:57.812930 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.812976 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 06 08:18:57 crc kubenswrapper[4784]: E0106 08:18:57.812988 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.812997 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 06 08:18:57 crc kubenswrapper[4784]: E0106 08:18:57.813013 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.813021 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 06 08:18:57 crc kubenswrapper[4784]: E0106 08:18:57.813033 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.813040 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 06 08:18:57 crc kubenswrapper[4784]: E0106 08:18:57.813051 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.813058 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 06 08:18:57 crc kubenswrapper[4784]: E0106 08:18:57.813071 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.813079 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 06 08:18:57 crc kubenswrapper[4784]: E0106 08:18:57.813090 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.813097 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.813225 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.813239 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.813253 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.813264 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.813274 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.815931 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.816999 4784 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.898923 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.900770 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.901388 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.901665 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.901832 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.901984 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.902278 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:57 crc kubenswrapper[4784]: I0106 08:18:57.902601 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:18:57 crc kubenswrapper[4784]: E0106 08:18:57.919664 4784 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.102:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009085 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009158 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009213 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009244 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009267 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009295 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009338 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009390 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009472 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009460 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009504 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009472 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009513 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009574 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009449 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.009589 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.220237 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: E0106 08:18:58.260868 4784 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.102:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188816734fd6e8af openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-06 08:18:58.259454127 +0000 UTC m=+240.305626994,LastTimestamp:2026-01-06 08:18:58.259454127 +0000 UTC m=+240.305626994,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.851659 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.853836 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.854790 4784 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff" exitCode=0 Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.854818 4784 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a" exitCode=0 Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.854826 4784 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2" exitCode=0 Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.854834 4784 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532" exitCode=2 Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.854923 4784 scope.go:117] "RemoveContainer" containerID="44a205fb174145bc853ffbff6152b92d782eab8dfd198c34300c71c4b8cc5cfd" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.856994 4784 generic.go:334] "Generic (PLEG): container finished" podID="7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" containerID="9b0a623f4c512cbf69decdc7ba6f650dd0d4ed837c27988e0d7d47c002e6e68c" exitCode=0 Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.857068 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4","Type":"ContainerDied","Data":"9b0a623f4c512cbf69decdc7ba6f650dd0d4ed837c27988e0d7d47c002e6e68c"} Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.857954 4784 status_manager.go:851] "Failed to get status for pod" podUID="7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.858428 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"305769838f06ffe5f19131b14f50bd21bf51899188c79bfb0f0b6986c618804f"} Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.858464 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"c43d95411b0a347640596c54d900da7830740f52e4a523ada84afc06b01525e0"} Jan 06 08:18:58 crc kubenswrapper[4784]: E0106 08:18:58.859444 4784 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.102:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:18:58 crc kubenswrapper[4784]: I0106 08:18:58.859629 4784 status_manager.go:851] "Failed to get status for pod" podUID="7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:18:59 crc kubenswrapper[4784]: I0106 08:18:59.869919 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.322523 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.323735 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.324124 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.324514 4784 status_manager.go:851] "Failed to get status for pod" podUID="7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.325385 4784 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.326015 4784 status_manager.go:851] "Failed to get status for pod" podUID="7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.374694 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.374801 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-var-lock\") pod \"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4\" (UID: \"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4\") " Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.374864 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.374905 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-kube-api-access\") pod \"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4\" (UID: \"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4\") " Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.374897 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.374956 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-kubelet-dir\") pod \"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4\" (UID: \"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4\") " Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.374962 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.375017 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.375000 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-var-lock" (OuterVolumeSpecName: "var-lock") pod "7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" (UID: "7b728c5c-d3f8-4dd1-bc84-306c50c17eb4"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.375067 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" (UID: "7b728c5c-d3f8-4dd1-bc84-306c50c17eb4"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.375172 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.375403 4784 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.375515 4784 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.375577 4784 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.375591 4784 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-var-lock\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.375602 4784 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.380908 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" (UID: "7b728c5c-d3f8-4dd1-bc84-306c50c17eb4"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.476660 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7b728c5c-d3f8-4dd1-bc84-306c50c17eb4-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.888737 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.889729 4784 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98" exitCode=0 Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.889870 4784 scope.go:117] "RemoveContainer" containerID="14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.889911 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.893536 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7b728c5c-d3f8-4dd1-bc84-306c50c17eb4","Type":"ContainerDied","Data":"de854cb6f57c269c6ae3a6500cd835fe85e706645ea57d78923402d57db70b7c"} Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.893596 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de854cb6f57c269c6ae3a6500cd835fe85e706645ea57d78923402d57db70b7c" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.893638 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.908995 4784 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.909976 4784 status_manager.go:851] "Failed to get status for pod" podUID="7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.911358 4784 scope.go:117] "RemoveContainer" containerID="c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.930885 4784 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.931327 4784 status_manager.go:851] "Failed to get status for pod" podUID="7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.943223 4784 scope.go:117] "RemoveContainer" containerID="ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.958199 4784 scope.go:117] "RemoveContainer" containerID="b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532" Jan 06 08:19:00 crc kubenswrapper[4784]: I0106 08:19:00.976636 4784 scope.go:117] "RemoveContainer" containerID="e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98" Jan 06 08:19:01 crc kubenswrapper[4784]: I0106 08:19:00.997227 4784 scope.go:117] "RemoveContainer" containerID="173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210" Jan 06 08:19:01 crc kubenswrapper[4784]: I0106 08:19:01.025165 4784 scope.go:117] "RemoveContainer" containerID="14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff" Jan 06 08:19:01 crc kubenswrapper[4784]: E0106 08:19:01.045867 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\": container with ID starting with 14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff not found: ID does not exist" containerID="14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff" Jan 06 08:19:01 crc kubenswrapper[4784]: I0106 08:19:01.045955 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff"} err="failed to get container status \"14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\": rpc error: code = NotFound desc = could not find container \"14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff\": container with ID starting with 14550ff7824cdc06b80ff88408d4491b7188fb7fa686de4d383d1606901dd6ff not found: ID does not exist" Jan 06 08:19:01 crc kubenswrapper[4784]: I0106 08:19:01.046011 4784 scope.go:117] "RemoveContainer" containerID="c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a" Jan 06 08:19:01 crc kubenswrapper[4784]: E0106 08:19:01.046702 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\": container with ID starting with c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a not found: ID does not exist" containerID="c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a" Jan 06 08:19:01 crc kubenswrapper[4784]: I0106 08:19:01.046819 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a"} err="failed to get container status \"c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\": rpc error: code = NotFound desc = could not find container \"c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a\": container with ID starting with c03570e9c76e3a2054aa67d147cfcd8d55f2e2af0cc7aca251e7b15e6c77fd2a not found: ID does not exist" Jan 06 08:19:01 crc kubenswrapper[4784]: I0106 08:19:01.046911 4784 scope.go:117] "RemoveContainer" containerID="ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2" Jan 06 08:19:01 crc kubenswrapper[4784]: E0106 08:19:01.050070 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\": container with ID starting with ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2 not found: ID does not exist" containerID="ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2" Jan 06 08:19:01 crc kubenswrapper[4784]: I0106 08:19:01.050130 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2"} err="failed to get container status \"ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\": rpc error: code = NotFound desc = could not find container \"ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2\": container with ID starting with ae387774c9b6816cde2e4eaa9df2103c02d1f7d74b0a17771825fc9f259f12a2 not found: ID does not exist" Jan 06 08:19:01 crc kubenswrapper[4784]: I0106 08:19:01.050161 4784 scope.go:117] "RemoveContainer" containerID="b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532" Jan 06 08:19:01 crc kubenswrapper[4784]: E0106 08:19:01.050655 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\": container with ID starting with b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532 not found: ID does not exist" containerID="b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532" Jan 06 08:19:01 crc kubenswrapper[4784]: I0106 08:19:01.050731 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532"} err="failed to get container status \"b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\": rpc error: code = NotFound desc = could not find container \"b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532\": container with ID starting with b562f7f1967ee3d075c48bc6aa6ae26e6ff45f93fed80c0039a1b6506103a532 not found: ID does not exist" Jan 06 08:19:01 crc kubenswrapper[4784]: I0106 08:19:01.050779 4784 scope.go:117] "RemoveContainer" containerID="e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98" Jan 06 08:19:01 crc kubenswrapper[4784]: E0106 08:19:01.051189 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\": container with ID starting with e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98 not found: ID does not exist" containerID="e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98" Jan 06 08:19:01 crc kubenswrapper[4784]: I0106 08:19:01.051311 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98"} err="failed to get container status \"e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\": rpc error: code = NotFound desc = could not find container \"e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98\": container with ID starting with e97a79f723dfc3bbe8aed1b10eda9c67dddb4f0d9f7e95ecbe405021a3b2dd98 not found: ID does not exist" Jan 06 08:19:01 crc kubenswrapper[4784]: I0106 08:19:01.051399 4784 scope.go:117] "RemoveContainer" containerID="173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210" Jan 06 08:19:01 crc kubenswrapper[4784]: E0106 08:19:01.051784 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\": container with ID starting with 173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210 not found: ID does not exist" containerID="173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210" Jan 06 08:19:01 crc kubenswrapper[4784]: I0106 08:19:01.051925 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210"} err="failed to get container status \"173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\": rpc error: code = NotFound desc = could not find container \"173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210\": container with ID starting with 173b43610540afd75c754f319732c819293a3630375c1b2954d0078ac30c6210 not found: ID does not exist" Jan 06 08:19:01 crc kubenswrapper[4784]: E0106 08:19:01.398460 4784 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.102:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188816734fd6e8af openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-06 08:18:58.259454127 +0000 UTC m=+240.305626994,LastTimestamp:2026-01-06 08:18:58.259454127 +0000 UTC m=+240.305626994,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 06 08:19:02 crc kubenswrapper[4784]: I0106 08:19:02.320672 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 06 08:19:03 crc kubenswrapper[4784]: E0106 08:19:03.369750 4784 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:03 crc kubenswrapper[4784]: E0106 08:19:03.370238 4784 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:03 crc kubenswrapper[4784]: E0106 08:19:03.371182 4784 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:03 crc kubenswrapper[4784]: E0106 08:19:03.371758 4784 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:03 crc kubenswrapper[4784]: E0106 08:19:03.372138 4784 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:03 crc kubenswrapper[4784]: I0106 08:19:03.372182 4784 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 06 08:19:03 crc kubenswrapper[4784]: E0106 08:19:03.372452 4784 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" interval="200ms" Jan 06 08:19:03 crc kubenswrapper[4784]: E0106 08:19:03.573152 4784 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" interval="400ms" Jan 06 08:19:03 crc kubenswrapper[4784]: E0106 08:19:03.976523 4784 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" interval="800ms" Jan 06 08:19:04 crc kubenswrapper[4784]: E0106 08:19:04.565991 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:19:04Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:19:04Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:19:04Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-06T08:19:04Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:04 crc kubenswrapper[4784]: E0106 08:19:04.566567 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:04 crc kubenswrapper[4784]: E0106 08:19:04.567222 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:04 crc kubenswrapper[4784]: E0106 08:19:04.567484 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:04 crc kubenswrapper[4784]: E0106 08:19:04.567847 4784 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:04 crc kubenswrapper[4784]: E0106 08:19:04.567891 4784 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 06 08:19:04 crc kubenswrapper[4784]: E0106 08:19:04.777985 4784 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" interval="1.6s" Jan 06 08:19:06 crc kubenswrapper[4784]: E0106 08:19:06.379126 4784 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" interval="3.2s" Jan 06 08:19:08 crc kubenswrapper[4784]: I0106 08:19:08.315665 4784 status_manager.go:851] "Failed to get status for pod" podUID="7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:09 crc kubenswrapper[4784]: E0106 08:19:09.581204 4784 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.102:6443: connect: connection refused" interval="6.4s" Jan 06 08:19:10 crc kubenswrapper[4784]: I0106 08:19:10.311828 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:19:10 crc kubenswrapper[4784]: I0106 08:19:10.312411 4784 status_manager.go:851] "Failed to get status for pod" podUID="7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:10 crc kubenswrapper[4784]: I0106 08:19:10.349293 4784 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7154710e-52c4-45a9-8a9d-87cf8ea16f5c" Jan 06 08:19:10 crc kubenswrapper[4784]: I0106 08:19:10.349357 4784 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7154710e-52c4-45a9-8a9d-87cf8ea16f5c" Jan 06 08:19:10 crc kubenswrapper[4784]: E0106 08:19:10.350215 4784 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.102:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:19:10 crc kubenswrapper[4784]: I0106 08:19:10.351106 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:19:10 crc kubenswrapper[4784]: W0106 08:19:10.386481 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-f665ecf94f903739e93546faa95d9a0f760eb98bb8529216127c9c8b62279537 WatchSource:0}: Error finding container f665ecf94f903739e93546faa95d9a0f760eb98bb8529216127c9c8b62279537: Status 404 returned error can't find the container with id f665ecf94f903739e93546faa95d9a0f760eb98bb8529216127c9c8b62279537 Jan 06 08:19:10 crc kubenswrapper[4784]: I0106 08:19:10.975161 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f665ecf94f903739e93546faa95d9a0f760eb98bb8529216127c9c8b62279537"} Jan 06 08:19:11 crc kubenswrapper[4784]: E0106 08:19:11.400585 4784 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.102:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188816734fd6e8af openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-06 08:18:58.259454127 +0000 UTC m=+240.305626994,LastTimestamp:2026-01-06 08:18:58.259454127 +0000 UTC m=+240.305626994,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 06 08:19:11 crc kubenswrapper[4784]: I0106 08:19:11.986684 4784 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="1e53472789cbb8c965fb0e4260010785e9a1f451ef14340b047d201fdce2cbde" exitCode=0 Jan 06 08:19:11 crc kubenswrapper[4784]: I0106 08:19:11.986804 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"1e53472789cbb8c965fb0e4260010785e9a1f451ef14340b047d201fdce2cbde"} Jan 06 08:19:11 crc kubenswrapper[4784]: I0106 08:19:11.987415 4784 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7154710e-52c4-45a9-8a9d-87cf8ea16f5c" Jan 06 08:19:11 crc kubenswrapper[4784]: I0106 08:19:11.987754 4784 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7154710e-52c4-45a9-8a9d-87cf8ea16f5c" Jan 06 08:19:11 crc kubenswrapper[4784]: I0106 08:19:11.987981 4784 status_manager.go:851] "Failed to get status for pod" podUID="7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.102:6443: connect: connection refused" Jan 06 08:19:11 crc kubenswrapper[4784]: E0106 08:19:11.988122 4784 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.102:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:19:12 crc kubenswrapper[4784]: E0106 08:19:12.339973 4784 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.102:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" volumeName="registry-storage" Jan 06 08:19:13 crc kubenswrapper[4784]: I0106 08:19:13.030268 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"fdf39065a4f8cdffe184485c03d43b4d51cb5214dc1fec286e41b0f6cb967a21"} Jan 06 08:19:13 crc kubenswrapper[4784]: I0106 08:19:13.031123 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f9d1d22e8062a8ed2b8d360f5a9345070ef3e9de431a58289bd0b27507554d2a"} Jan 06 08:19:13 crc kubenswrapper[4784]: I0106 08:19:13.033959 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 06 08:19:13 crc kubenswrapper[4784]: I0106 08:19:13.034038 4784 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966" exitCode=1 Jan 06 08:19:13 crc kubenswrapper[4784]: I0106 08:19:13.034079 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966"} Jan 06 08:19:13 crc kubenswrapper[4784]: I0106 08:19:13.035419 4784 scope.go:117] "RemoveContainer" containerID="f8e21165f5f253ada171adc7d7e52b1b01f1e9e0922e44e2321608205ead6966" Jan 06 08:19:14 crc kubenswrapper[4784]: I0106 08:19:14.045411 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 06 08:19:14 crc kubenswrapper[4784]: I0106 08:19:14.046113 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2da5d1a81c2a99589beb8da784875de967b06d3e8107db3bfb0497913868d8ed"} Jan 06 08:19:14 crc kubenswrapper[4784]: I0106 08:19:14.051389 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"ae43bc920c46a9c0bd9fb81f2438d995a6f22c9e248ab11a12c838d1786fcf2d"} Jan 06 08:19:14 crc kubenswrapper[4784]: I0106 08:19:14.051474 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7d59cb941859249e037a6ae5780d4cd897c03520fd693443ee6827cff302a3e9"} Jan 06 08:19:14 crc kubenswrapper[4784]: I0106 08:19:14.051500 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5b8db8b57a0d73bab166f7240e91e29bcb68b60ca95607a49ffb171d20c7ab57"} Jan 06 08:19:14 crc kubenswrapper[4784]: I0106 08:19:14.051628 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:19:14 crc kubenswrapper[4784]: I0106 08:19:14.051780 4784 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7154710e-52c4-45a9-8a9d-87cf8ea16f5c" Jan 06 08:19:14 crc kubenswrapper[4784]: I0106 08:19:14.051817 4784 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7154710e-52c4-45a9-8a9d-87cf8ea16f5c" Jan 06 08:19:15 crc kubenswrapper[4784]: I0106 08:19:15.351346 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:19:15 crc kubenswrapper[4784]: I0106 08:19:15.352189 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:19:15 crc kubenswrapper[4784]: I0106 08:19:15.359742 4784 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]log ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]etcd ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/openshift.io-api-request-count-filter ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/openshift.io-startkubeinformers ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/generic-apiserver-start-informers ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/priority-and-fairness-config-consumer ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/priority-and-fairness-filter ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/start-apiextensions-informers ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/start-apiextensions-controllers ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/crd-informer-synced ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/start-system-namespaces-controller ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/start-cluster-authentication-info-controller ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/start-legacy-token-tracking-controller ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/start-service-ip-repair-controllers ok Jan 06 08:19:15 crc kubenswrapper[4784]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/scheduling/bootstrap-system-priority-classes ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/priority-and-fairness-config-producer ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/bootstrap-controller ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/start-kube-aggregator-informers ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/apiservice-status-local-available-controller ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/apiservice-status-remote-available-controller ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/apiservice-registration-controller ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/apiservice-wait-for-first-sync ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/apiservice-discovery-controller ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/kube-apiserver-autoregistration ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]autoregister-completion ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/apiservice-openapi-controller ok Jan 06 08:19:15 crc kubenswrapper[4784]: [+]poststarthook/apiservice-openapiv3-controller ok Jan 06 08:19:15 crc kubenswrapper[4784]: livez check failed Jan 06 08:19:15 crc kubenswrapper[4784]: I0106 08:19:15.359851 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 06 08:19:15 crc kubenswrapper[4784]: I0106 08:19:15.583337 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:19:15 crc kubenswrapper[4784]: I0106 08:19:15.584473 4784 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 06 08:19:15 crc kubenswrapper[4784]: I0106 08:19:15.584523 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 06 08:19:19 crc kubenswrapper[4784]: I0106 08:19:19.325336 4784 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:19:19 crc kubenswrapper[4784]: I0106 08:19:19.455032 4784 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="808360b8-bdd0-4156-b1f7-e5f470f6cd14" Jan 06 08:19:19 crc kubenswrapper[4784]: I0106 08:19:19.760137 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:19:20 crc kubenswrapper[4784]: I0106 08:19:20.092089 4784 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7154710e-52c4-45a9-8a9d-87cf8ea16f5c" Jan 06 08:19:20 crc kubenswrapper[4784]: I0106 08:19:20.092515 4784 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7154710e-52c4-45a9-8a9d-87cf8ea16f5c" Jan 06 08:19:20 crc kubenswrapper[4784]: I0106 08:19:20.096164 4784 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="808360b8-bdd0-4156-b1f7-e5f470f6cd14" Jan 06 08:19:25 crc kubenswrapper[4784]: I0106 08:19:25.583343 4784 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 06 08:19:25 crc kubenswrapper[4784]: I0106 08:19:25.586243 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 06 08:19:29 crc kubenswrapper[4784]: I0106 08:19:29.451292 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 06 08:19:29 crc kubenswrapper[4784]: I0106 08:19:29.604338 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 06 08:19:29 crc kubenswrapper[4784]: I0106 08:19:29.696965 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 06 08:19:30 crc kubenswrapper[4784]: I0106 08:19:29.999529 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 06 08:19:30 crc kubenswrapper[4784]: I0106 08:19:30.961574 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 06 08:19:31 crc kubenswrapper[4784]: I0106 08:19:31.030391 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 06 08:19:31 crc kubenswrapper[4784]: I0106 08:19:31.175197 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 06 08:19:31 crc kubenswrapper[4784]: I0106 08:19:31.240970 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 06 08:19:31 crc kubenswrapper[4784]: I0106 08:19:31.263467 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 06 08:19:31 crc kubenswrapper[4784]: I0106 08:19:31.490167 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 06 08:19:31 crc kubenswrapper[4784]: I0106 08:19:31.698322 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 06 08:19:31 crc kubenswrapper[4784]: I0106 08:19:31.732576 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 06 08:19:31 crc kubenswrapper[4784]: I0106 08:19:31.911106 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 06 08:19:31 crc kubenswrapper[4784]: I0106 08:19:31.955982 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.061252 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.079384 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.088035 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.157673 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.285440 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.454360 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.487660 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.500084 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.519733 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.557252 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.619278 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.693461 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.920256 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.928807 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.944014 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.978460 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.985610 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 06 08:19:32 crc kubenswrapper[4784]: I0106 08:19:32.995158 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 06 08:19:33 crc kubenswrapper[4784]: I0106 08:19:33.018319 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 06 08:19:33 crc kubenswrapper[4784]: I0106 08:19:33.062154 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 06 08:19:33 crc kubenswrapper[4784]: I0106 08:19:33.096168 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 06 08:19:33 crc kubenswrapper[4784]: I0106 08:19:33.154596 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 06 08:19:33 crc kubenswrapper[4784]: I0106 08:19:33.476721 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 06 08:19:33 crc kubenswrapper[4784]: I0106 08:19:33.496328 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 06 08:19:33 crc kubenswrapper[4784]: I0106 08:19:33.643156 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 06 08:19:33 crc kubenswrapper[4784]: I0106 08:19:33.647236 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 06 08:19:33 crc kubenswrapper[4784]: I0106 08:19:33.676384 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 06 08:19:33 crc kubenswrapper[4784]: I0106 08:19:33.774670 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 06 08:19:33 crc kubenswrapper[4784]: I0106 08:19:33.863130 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 06 08:19:33 crc kubenswrapper[4784]: I0106 08:19:33.866536 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 06 08:19:33 crc kubenswrapper[4784]: I0106 08:19:33.918432 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 06 08:19:33 crc kubenswrapper[4784]: I0106 08:19:33.924496 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 06 08:19:34 crc kubenswrapper[4784]: I0106 08:19:34.025666 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 06 08:19:34 crc kubenswrapper[4784]: I0106 08:19:34.058771 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 06 08:19:34 crc kubenswrapper[4784]: I0106 08:19:34.137003 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 06 08:19:34 crc kubenswrapper[4784]: I0106 08:19:34.287871 4784 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 06 08:19:34 crc kubenswrapper[4784]: I0106 08:19:34.395361 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 06 08:19:34 crc kubenswrapper[4784]: I0106 08:19:34.414415 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 06 08:19:34 crc kubenswrapper[4784]: I0106 08:19:34.487666 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 06 08:19:34 crc kubenswrapper[4784]: I0106 08:19:34.512041 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 06 08:19:34 crc kubenswrapper[4784]: I0106 08:19:34.852921 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 06 08:19:34 crc kubenswrapper[4784]: I0106 08:19:34.892021 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 06 08:19:34 crc kubenswrapper[4784]: I0106 08:19:34.910917 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.097743 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.144966 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.170067 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.170085 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.212842 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.223284 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.229826 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.258760 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.370582 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.370708 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.376730 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.390795 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.403476 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.538236 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.578724 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.589193 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.591181 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.597717 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.652607 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.655916 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.708653 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.753690 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.817945 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.827350 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.844519 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.851559 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.853724 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.867710 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.911330 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 06 08:19:35 crc kubenswrapper[4784]: I0106 08:19:35.971603 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.020511 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.089830 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.097515 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.276518 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.300160 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.311912 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.314785 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.334384 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.389501 4784 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.400726 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.400915 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.401624 4784 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7154710e-52c4-45a9-8a9d-87cf8ea16f5c" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.401677 4784 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7154710e-52c4-45a9-8a9d-87cf8ea16f5c" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.407648 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.430424 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=17.430398654 podStartE2EDuration="17.430398654s" podCreationTimestamp="2026-01-06 08:19:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:19:36.426241512 +0000 UTC m=+278.472414389" watchObservedRunningTime="2026-01-06 08:19:36.430398654 +0000 UTC m=+278.476571521" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.473439 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.527683 4784 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.559649 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.583203 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.595196 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.655425 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.696189 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.758487 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.798319 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.816183 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.902823 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.940804 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 06 08:19:36 crc kubenswrapper[4784]: I0106 08:19:36.961636 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 06 08:19:37 crc kubenswrapper[4784]: I0106 08:19:37.081510 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 06 08:19:37 crc kubenswrapper[4784]: I0106 08:19:37.081707 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 06 08:19:37 crc kubenswrapper[4784]: I0106 08:19:37.114779 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 06 08:19:37 crc kubenswrapper[4784]: I0106 08:19:37.176015 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 06 08:19:37 crc kubenswrapper[4784]: I0106 08:19:37.224346 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 06 08:19:37 crc kubenswrapper[4784]: I0106 08:19:37.312010 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 06 08:19:37 crc kubenswrapper[4784]: I0106 08:19:37.319076 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 06 08:19:37 crc kubenswrapper[4784]: I0106 08:19:37.495250 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 06 08:19:37 crc kubenswrapper[4784]: I0106 08:19:37.529511 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 06 08:19:37 crc kubenswrapper[4784]: I0106 08:19:37.668513 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 06 08:19:37 crc kubenswrapper[4784]: I0106 08:19:37.716399 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 06 08:19:37 crc kubenswrapper[4784]: I0106 08:19:37.741598 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 06 08:19:37 crc kubenswrapper[4784]: I0106 08:19:37.813644 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 06 08:19:37 crc kubenswrapper[4784]: I0106 08:19:37.993119 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.028298 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.136815 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.169959 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.277244 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.398256 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.471853 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.474887 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.492970 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.494716 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.526085 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.528828 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.544992 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.561200 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.679669 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.730764 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.747507 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.838653 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.854591 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.894709 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 06 08:19:38 crc kubenswrapper[4784]: I0106 08:19:38.982609 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.015016 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.095043 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.151047 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.156789 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.278119 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.515046 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.515248 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.615285 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.704342 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.705089 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.755076 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.766313 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.798351 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.922501 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.941125 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.970147 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 06 08:19:39 crc kubenswrapper[4784]: I0106 08:19:39.986360 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.012811 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.106012 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.133673 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.154024 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.162709 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.216878 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.259514 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.327155 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.358648 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.364685 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.380163 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.444424 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.491327 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.507348 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.524776 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.549202 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.577654 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.585394 4784 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.726653 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.728503 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.752786 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.762784 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.814486 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.874930 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.887475 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.911584 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.973728 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 06 08:19:40 crc kubenswrapper[4784]: I0106 08:19:40.996794 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.011736 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.050718 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.080987 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.135468 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.218472 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.228487 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.259753 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.441408 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.471529 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.548884 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.590754 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.601895 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.611109 4784 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.654610 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.688260 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.714109 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.819894 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.882263 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.888156 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.904092 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.918159 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 06 08:19:41 crc kubenswrapper[4784]: I0106 08:19:41.995298 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 06 08:19:42 crc kubenswrapper[4784]: I0106 08:19:42.062300 4784 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 06 08:19:42 crc kubenswrapper[4784]: I0106 08:19:42.062786 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://305769838f06ffe5f19131b14f50bd21bf51899188c79bfb0f0b6986c618804f" gracePeriod=5 Jan 06 08:19:42 crc kubenswrapper[4784]: I0106 08:19:42.079063 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 06 08:19:42 crc kubenswrapper[4784]: I0106 08:19:42.110149 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 06 08:19:42 crc kubenswrapper[4784]: I0106 08:19:42.140341 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 06 08:19:42 crc kubenswrapper[4784]: I0106 08:19:42.382102 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 06 08:19:42 crc kubenswrapper[4784]: I0106 08:19:42.537870 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 06 08:19:42 crc kubenswrapper[4784]: I0106 08:19:42.640102 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 06 08:19:42 crc kubenswrapper[4784]: I0106 08:19:42.688724 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 06 08:19:42 crc kubenswrapper[4784]: I0106 08:19:42.750805 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 06 08:19:42 crc kubenswrapper[4784]: I0106 08:19:42.868469 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 06 08:19:42 crc kubenswrapper[4784]: I0106 08:19:42.889916 4784 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 06 08:19:42 crc kubenswrapper[4784]: I0106 08:19:42.926366 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 06 08:19:42 crc kubenswrapper[4784]: I0106 08:19:42.946571 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.002283 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.016248 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.112823 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.117677 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.147969 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.157295 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.206962 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.375182 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.440308 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.446002 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.453046 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.470029 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.584042 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.624787 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.629789 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.757339 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 06 08:19:43 crc kubenswrapper[4784]: I0106 08:19:43.937146 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 06 08:19:44 crc kubenswrapper[4784]: I0106 08:19:44.050601 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 06 08:19:44 crc kubenswrapper[4784]: I0106 08:19:44.153527 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 06 08:19:44 crc kubenswrapper[4784]: I0106 08:19:44.225078 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 06 08:19:44 crc kubenswrapper[4784]: I0106 08:19:44.351083 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 06 08:19:44 crc kubenswrapper[4784]: I0106 08:19:44.384814 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 06 08:19:44 crc kubenswrapper[4784]: I0106 08:19:44.527002 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 06 08:19:44 crc kubenswrapper[4784]: I0106 08:19:44.748417 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 06 08:19:44 crc kubenswrapper[4784]: I0106 08:19:44.788322 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 06 08:19:44 crc kubenswrapper[4784]: I0106 08:19:44.935012 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 06 08:19:44 crc kubenswrapper[4784]: I0106 08:19:44.943530 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 06 08:19:45 crc kubenswrapper[4784]: I0106 08:19:45.177895 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 06 08:19:45 crc kubenswrapper[4784]: I0106 08:19:45.512433 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 06 08:19:45 crc kubenswrapper[4784]: I0106 08:19:45.527897 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 06 08:19:45 crc kubenswrapper[4784]: I0106 08:19:45.561893 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 06 08:19:45 crc kubenswrapper[4784]: I0106 08:19:45.852948 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 06 08:19:46 crc kubenswrapper[4784]: I0106 08:19:46.082278 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 06 08:19:46 crc kubenswrapper[4784]: I0106 08:19:46.872356 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.148251 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jgfwh"] Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.148533 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jgfwh" podUID="b6eac71f-1d65-4542-9722-211fee770bba" containerName="registry-server" containerID="cri-o://edc01d92310dc24dfe3255462cc87817661581d3d6b76fdab6cd293aade7e070" gracePeriod=30 Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.156976 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8l2cs"] Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.157397 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8l2cs" podUID="d8eba439-a397-4536-8b2e-cde21cfc1384" containerName="registry-server" containerID="cri-o://a970ffb935c82d0563173431d519993eaf34e48c059bf132cedab43e885d76cb" gracePeriod=30 Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.187342 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vkt2h"] Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.187694 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" podUID="47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97" containerName="marketplace-operator" containerID="cri-o://cd1b1691f2343bdaab939532ded996460d6a9f7233c48e8807f0e912318e08b7" gracePeriod=30 Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.217634 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ffzbc"] Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.217974 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ffzbc" podUID="971db4c5-67a0-42f5-b71f-042c91d097b5" containerName="registry-server" containerID="cri-o://5ce5a9cdee08bce79dcf1b256e5891aca20fd27a854fc039981205c1613891d2" gracePeriod=30 Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.236049 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wv9d2"] Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.236382 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wv9d2" podUID="77358ffe-6346-4747-9847-27c607f4a2a3" containerName="registry-server" containerID="cri-o://e6fe7b13e9b02dfc7edce1ce5ead850e3e2cbe5a800b6ef826423378d528d7b6" gracePeriod=30 Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.305393 4784 generic.go:334] "Generic (PLEG): container finished" podID="47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97" containerID="cd1b1691f2343bdaab939532ded996460d6a9f7233c48e8807f0e912318e08b7" exitCode=0 Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.305455 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" event={"ID":"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97","Type":"ContainerDied","Data":"cd1b1691f2343bdaab939532ded996460d6a9f7233c48e8807f0e912318e08b7"} Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.307648 4784 generic.go:334] "Generic (PLEG): container finished" podID="d8eba439-a397-4536-8b2e-cde21cfc1384" containerID="a970ffb935c82d0563173431d519993eaf34e48c059bf132cedab43e885d76cb" exitCode=0 Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.307689 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8l2cs" event={"ID":"d8eba439-a397-4536-8b2e-cde21cfc1384","Type":"ContainerDied","Data":"a970ffb935c82d0563173431d519993eaf34e48c059bf132cedab43e885d76cb"} Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.309863 4784 generic.go:334] "Generic (PLEG): container finished" podID="b6eac71f-1d65-4542-9722-211fee770bba" containerID="edc01d92310dc24dfe3255462cc87817661581d3d6b76fdab6cd293aade7e070" exitCode=0 Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.309906 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jgfwh" event={"ID":"b6eac71f-1d65-4542-9722-211fee770bba","Type":"ContainerDied","Data":"edc01d92310dc24dfe3255462cc87817661581d3d6b76fdab6cd293aade7e070"} Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.311258 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.311291 4784 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="305769838f06ffe5f19131b14f50bd21bf51899188c79bfb0f0b6986c618804f" exitCode=137 Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.618289 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.623754 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.623860 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.636806 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.643476 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.648808 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700353 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77358ffe-6346-4747-9847-27c607f4a2a3-utilities\") pod \"77358ffe-6346-4747-9847-27c607f4a2a3\" (UID: \"77358ffe-6346-4747-9847-27c607f4a2a3\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700402 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8eba439-a397-4536-8b2e-cde21cfc1384-catalog-content\") pod \"d8eba439-a397-4536-8b2e-cde21cfc1384\" (UID: \"d8eba439-a397-4536-8b2e-cde21cfc1384\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700474 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700517 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wzd2v\" (UniqueName: \"kubernetes.io/projected/d8eba439-a397-4536-8b2e-cde21cfc1384-kube-api-access-wzd2v\") pod \"d8eba439-a397-4536-8b2e-cde21cfc1384\" (UID: \"d8eba439-a397-4536-8b2e-cde21cfc1384\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700559 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700582 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700609 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-marketplace-operator-metrics\") pod \"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97\" (UID: \"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700643 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8eba439-a397-4536-8b2e-cde21cfc1384-utilities\") pod \"d8eba439-a397-4536-8b2e-cde21cfc1384\" (UID: \"d8eba439-a397-4536-8b2e-cde21cfc1384\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700666 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77358ffe-6346-4747-9847-27c607f4a2a3-catalog-content\") pod \"77358ffe-6346-4747-9847-27c607f4a2a3\" (UID: \"77358ffe-6346-4747-9847-27c607f4a2a3\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700688 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srw6m\" (UniqueName: \"kubernetes.io/projected/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-kube-api-access-srw6m\") pod \"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97\" (UID: \"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700721 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700758 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700785 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/971db4c5-67a0-42f5-b71f-042c91d097b5-catalog-content\") pod \"971db4c5-67a0-42f5-b71f-042c91d097b5\" (UID: \"971db4c5-67a0-42f5-b71f-042c91d097b5\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700821 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p759p\" (UniqueName: \"kubernetes.io/projected/971db4c5-67a0-42f5-b71f-042c91d097b5-kube-api-access-p759p\") pod \"971db4c5-67a0-42f5-b71f-042c91d097b5\" (UID: \"971db4c5-67a0-42f5-b71f-042c91d097b5\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700856 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-marketplace-trusted-ca\") pod \"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97\" (UID: \"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700880 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/971db4c5-67a0-42f5-b71f-042c91d097b5-utilities\") pod \"971db4c5-67a0-42f5-b71f-042c91d097b5\" (UID: \"971db4c5-67a0-42f5-b71f-042c91d097b5\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.700908 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7h46l\" (UniqueName: \"kubernetes.io/projected/77358ffe-6346-4747-9847-27c607f4a2a3-kube-api-access-7h46l\") pod \"77358ffe-6346-4747-9847-27c607f4a2a3\" (UID: \"77358ffe-6346-4747-9847-27c607f4a2a3\") " Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.702116 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.702211 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77358ffe-6346-4747-9847-27c607f4a2a3-utilities" (OuterVolumeSpecName: "utilities") pod "77358ffe-6346-4747-9847-27c607f4a2a3" (UID: "77358ffe-6346-4747-9847-27c607f4a2a3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.702202 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.702269 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.703211 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97" (UID: "47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.703279 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.703810 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8eba439-a397-4536-8b2e-cde21cfc1384-utilities" (OuterVolumeSpecName: "utilities") pod "d8eba439-a397-4536-8b2e-cde21cfc1384" (UID: "d8eba439-a397-4536-8b2e-cde21cfc1384"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.704618 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/971db4c5-67a0-42f5-b71f-042c91d097b5-utilities" (OuterVolumeSpecName: "utilities") pod "971db4c5-67a0-42f5-b71f-042c91d097b5" (UID: "971db4c5-67a0-42f5-b71f-042c91d097b5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.708584 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77358ffe-6346-4747-9847-27c607f4a2a3-kube-api-access-7h46l" (OuterVolumeSpecName: "kube-api-access-7h46l") pod "77358ffe-6346-4747-9847-27c607f4a2a3" (UID: "77358ffe-6346-4747-9847-27c607f4a2a3"). InnerVolumeSpecName "kube-api-access-7h46l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.709649 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8eba439-a397-4536-8b2e-cde21cfc1384-kube-api-access-wzd2v" (OuterVolumeSpecName: "kube-api-access-wzd2v") pod "d8eba439-a397-4536-8b2e-cde21cfc1384" (UID: "d8eba439-a397-4536-8b2e-cde21cfc1384"). InnerVolumeSpecName "kube-api-access-wzd2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.710521 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/971db4c5-67a0-42f5-b71f-042c91d097b5-kube-api-access-p759p" (OuterVolumeSpecName: "kube-api-access-p759p") pod "971db4c5-67a0-42f5-b71f-042c91d097b5" (UID: "971db4c5-67a0-42f5-b71f-042c91d097b5"). InnerVolumeSpecName "kube-api-access-p759p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.711282 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97" (UID: "47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.711935 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-kube-api-access-srw6m" (OuterVolumeSpecName: "kube-api-access-srw6m") pod "47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97" (UID: "47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97"). InnerVolumeSpecName "kube-api-access-srw6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.713025 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.739006 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/971db4c5-67a0-42f5-b71f-042c91d097b5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "971db4c5-67a0-42f5-b71f-042c91d097b5" (UID: "971db4c5-67a0-42f5-b71f-042c91d097b5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.767412 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8eba439-a397-4536-8b2e-cde21cfc1384-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d8eba439-a397-4536-8b2e-cde21cfc1384" (UID: "d8eba439-a397-4536-8b2e-cde21cfc1384"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803381 4784 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803438 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wzd2v\" (UniqueName: \"kubernetes.io/projected/d8eba439-a397-4536-8b2e-cde21cfc1384-kube-api-access-wzd2v\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803453 4784 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803461 4784 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803470 4784 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803480 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8eba439-a397-4536-8b2e-cde21cfc1384-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803488 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srw6m\" (UniqueName: \"kubernetes.io/projected/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-kube-api-access-srw6m\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803496 4784 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803516 4784 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803525 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/971db4c5-67a0-42f5-b71f-042c91d097b5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803532 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p759p\" (UniqueName: \"kubernetes.io/projected/971db4c5-67a0-42f5-b71f-042c91d097b5-kube-api-access-p759p\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803565 4784 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803579 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/971db4c5-67a0-42f5-b71f-042c91d097b5-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803589 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7h46l\" (UniqueName: \"kubernetes.io/projected/77358ffe-6346-4747-9847-27c607f4a2a3-kube-api-access-7h46l\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803602 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77358ffe-6346-4747-9847-27c607f4a2a3-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.803613 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8eba439-a397-4536-8b2e-cde21cfc1384-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.841921 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77358ffe-6346-4747-9847-27c607f4a2a3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "77358ffe-6346-4747-9847-27c607f4a2a3" (UID: "77358ffe-6346-4747-9847-27c607f4a2a3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:19:47 crc kubenswrapper[4784]: I0106 08:19:47.904619 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77358ffe-6346-4747-9847-27c607f4a2a3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.027129 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.108032 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6eac71f-1d65-4542-9722-211fee770bba-catalog-content\") pod \"b6eac71f-1d65-4542-9722-211fee770bba\" (UID: \"b6eac71f-1d65-4542-9722-211fee770bba\") " Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.108194 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6eac71f-1d65-4542-9722-211fee770bba-utilities\") pod \"b6eac71f-1d65-4542-9722-211fee770bba\" (UID: \"b6eac71f-1d65-4542-9722-211fee770bba\") " Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.108322 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7dlj\" (UniqueName: \"kubernetes.io/projected/b6eac71f-1d65-4542-9722-211fee770bba-kube-api-access-l7dlj\") pod \"b6eac71f-1d65-4542-9722-211fee770bba\" (UID: \"b6eac71f-1d65-4542-9722-211fee770bba\") " Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.110331 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6eac71f-1d65-4542-9722-211fee770bba-utilities" (OuterVolumeSpecName: "utilities") pod "b6eac71f-1d65-4542-9722-211fee770bba" (UID: "b6eac71f-1d65-4542-9722-211fee770bba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.114535 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6eac71f-1d65-4542-9722-211fee770bba-kube-api-access-l7dlj" (OuterVolumeSpecName: "kube-api-access-l7dlj") pod "b6eac71f-1d65-4542-9722-211fee770bba" (UID: "b6eac71f-1d65-4542-9722-211fee770bba"). InnerVolumeSpecName "kube-api-access-l7dlj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.169770 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6eac71f-1d65-4542-9722-211fee770bba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b6eac71f-1d65-4542-9722-211fee770bba" (UID: "b6eac71f-1d65-4542-9722-211fee770bba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.210147 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7dlj\" (UniqueName: \"kubernetes.io/projected/b6eac71f-1d65-4542-9722-211fee770bba-kube-api-access-l7dlj\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.210185 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6eac71f-1d65-4542-9722-211fee770bba-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.210198 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6eac71f-1d65-4542-9722-211fee770bba-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.324619 4784 generic.go:334] "Generic (PLEG): container finished" podID="77358ffe-6346-4747-9847-27c607f4a2a3" containerID="e6fe7b13e9b02dfc7edce1ce5ead850e3e2cbe5a800b6ef826423378d528d7b6" exitCode=0 Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.324784 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wv9d2" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.327429 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.330140 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wv9d2" event={"ID":"77358ffe-6346-4747-9847-27c607f4a2a3","Type":"ContainerDied","Data":"e6fe7b13e9b02dfc7edce1ce5ead850e3e2cbe5a800b6ef826423378d528d7b6"} Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.330208 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wv9d2" event={"ID":"77358ffe-6346-4747-9847-27c607f4a2a3","Type":"ContainerDied","Data":"5616c201db1683d96f83b56c6988d487d94d98a9027d7251bd0501db2553fb01"} Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.330264 4784 scope.go:117] "RemoveContainer" containerID="e6fe7b13e9b02dfc7edce1ce5ead850e3e2cbe5a800b6ef826423378d528d7b6" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.331121 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8l2cs" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.331107 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8l2cs" event={"ID":"d8eba439-a397-4536-8b2e-cde21cfc1384","Type":"ContainerDied","Data":"673f6efab892b18555717542f6022bfeb8e5a59e2021ba00b4a8410017ea8d55"} Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.340521 4784 generic.go:334] "Generic (PLEG): container finished" podID="971db4c5-67a0-42f5-b71f-042c91d097b5" containerID="5ce5a9cdee08bce79dcf1b256e5891aca20fd27a854fc039981205c1613891d2" exitCode=0 Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.340778 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ffzbc" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.340737 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ffzbc" event={"ID":"971db4c5-67a0-42f5-b71f-042c91d097b5","Type":"ContainerDied","Data":"5ce5a9cdee08bce79dcf1b256e5891aca20fd27a854fc039981205c1613891d2"} Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.341151 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ffzbc" event={"ID":"971db4c5-67a0-42f5-b71f-042c91d097b5","Type":"ContainerDied","Data":"a94897c75ae571f9b917583df11a8083303c7b95409a762ef0c790818a749433"} Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.349358 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jgfwh" event={"ID":"b6eac71f-1d65-4542-9722-211fee770bba","Type":"ContainerDied","Data":"f1ddc8243b389cf95f5fc722170b444bdfdc8386940aad180160aa8e08661341"} Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.349486 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jgfwh" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.354915 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.355225 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.359830 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" event={"ID":"47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97","Type":"ContainerDied","Data":"10891dc501072be4bd8e8c67b9f94a3dfe66d77807a8f56248112a748b9660f6"} Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.359877 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vkt2h" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.362122 4784 scope.go:117] "RemoveContainer" containerID="fd2f1b2f3226e386bf9f5d94360e5e787ee63355f16aa08ee90fff37943aa4fd" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.403904 4784 scope.go:117] "RemoveContainer" containerID="4b79ae8a056f8e703e496873e609f88446489f4fb21f3cb52b445e006f940cf3" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.432939 4784 scope.go:117] "RemoveContainer" containerID="e6fe7b13e9b02dfc7edce1ce5ead850e3e2cbe5a800b6ef826423378d528d7b6" Jan 06 08:19:48 crc kubenswrapper[4784]: E0106 08:19:48.433654 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6fe7b13e9b02dfc7edce1ce5ead850e3e2cbe5a800b6ef826423378d528d7b6\": container with ID starting with e6fe7b13e9b02dfc7edce1ce5ead850e3e2cbe5a800b6ef826423378d528d7b6 not found: ID does not exist" containerID="e6fe7b13e9b02dfc7edce1ce5ead850e3e2cbe5a800b6ef826423378d528d7b6" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.433704 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6fe7b13e9b02dfc7edce1ce5ead850e3e2cbe5a800b6ef826423378d528d7b6"} err="failed to get container status \"e6fe7b13e9b02dfc7edce1ce5ead850e3e2cbe5a800b6ef826423378d528d7b6\": rpc error: code = NotFound desc = could not find container \"e6fe7b13e9b02dfc7edce1ce5ead850e3e2cbe5a800b6ef826423378d528d7b6\": container with ID starting with e6fe7b13e9b02dfc7edce1ce5ead850e3e2cbe5a800b6ef826423378d528d7b6 not found: ID does not exist" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.433733 4784 scope.go:117] "RemoveContainer" containerID="fd2f1b2f3226e386bf9f5d94360e5e787ee63355f16aa08ee90fff37943aa4fd" Jan 06 08:19:48 crc kubenswrapper[4784]: E0106 08:19:48.434489 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd2f1b2f3226e386bf9f5d94360e5e787ee63355f16aa08ee90fff37943aa4fd\": container with ID starting with fd2f1b2f3226e386bf9f5d94360e5e787ee63355f16aa08ee90fff37943aa4fd not found: ID does not exist" containerID="fd2f1b2f3226e386bf9f5d94360e5e787ee63355f16aa08ee90fff37943aa4fd" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.434589 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd2f1b2f3226e386bf9f5d94360e5e787ee63355f16aa08ee90fff37943aa4fd"} err="failed to get container status \"fd2f1b2f3226e386bf9f5d94360e5e787ee63355f16aa08ee90fff37943aa4fd\": rpc error: code = NotFound desc = could not find container \"fd2f1b2f3226e386bf9f5d94360e5e787ee63355f16aa08ee90fff37943aa4fd\": container with ID starting with fd2f1b2f3226e386bf9f5d94360e5e787ee63355f16aa08ee90fff37943aa4fd not found: ID does not exist" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.434632 4784 scope.go:117] "RemoveContainer" containerID="4b79ae8a056f8e703e496873e609f88446489f4fb21f3cb52b445e006f940cf3" Jan 06 08:19:48 crc kubenswrapper[4784]: E0106 08:19:48.435077 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b79ae8a056f8e703e496873e609f88446489f4fb21f3cb52b445e006f940cf3\": container with ID starting with 4b79ae8a056f8e703e496873e609f88446489f4fb21f3cb52b445e006f940cf3 not found: ID does not exist" containerID="4b79ae8a056f8e703e496873e609f88446489f4fb21f3cb52b445e006f940cf3" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.435124 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b79ae8a056f8e703e496873e609f88446489f4fb21f3cb52b445e006f940cf3"} err="failed to get container status \"4b79ae8a056f8e703e496873e609f88446489f4fb21f3cb52b445e006f940cf3\": rpc error: code = NotFound desc = could not find container \"4b79ae8a056f8e703e496873e609f88446489f4fb21f3cb52b445e006f940cf3\": container with ID starting with 4b79ae8a056f8e703e496873e609f88446489f4fb21f3cb52b445e006f940cf3 not found: ID does not exist" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.435302 4784 scope.go:117] "RemoveContainer" containerID="a970ffb935c82d0563173431d519993eaf34e48c059bf132cedab43e885d76cb" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.459591 4784 scope.go:117] "RemoveContainer" containerID="03b86fdc94b556970b8cbcd0e4750d3e0b40be99a375de31781eb1b9de611602" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.467527 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8l2cs"] Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.473236 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8l2cs"] Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.483505 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ffzbc"] Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.487445 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ffzbc"] Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.501022 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jgfwh"] Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.504699 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jgfwh"] Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.506214 4784 scope.go:117] "RemoveContainer" containerID="67bc8735f68736a0df59a183bd5f1b1dcd6efce20ef78be5582253c5799c46e5" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.512912 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wv9d2"] Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.535752 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wv9d2"] Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.544063 4784 scope.go:117] "RemoveContainer" containerID="5ce5a9cdee08bce79dcf1b256e5891aca20fd27a854fc039981205c1613891d2" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.549786 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vkt2h"] Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.556584 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vkt2h"] Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.564482 4784 scope.go:117] "RemoveContainer" containerID="eb574f8dc15bda5a7b93b7b35f3b24e7aa42545f4dd75ab2c8b197a9f3f4778d" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.579476 4784 scope.go:117] "RemoveContainer" containerID="eee8e10a9f7671da51cd771e569dea538d14c83d2605d8a8cd3b127fc1538a54" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.593729 4784 scope.go:117] "RemoveContainer" containerID="5ce5a9cdee08bce79dcf1b256e5891aca20fd27a854fc039981205c1613891d2" Jan 06 08:19:48 crc kubenswrapper[4784]: E0106 08:19:48.594104 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ce5a9cdee08bce79dcf1b256e5891aca20fd27a854fc039981205c1613891d2\": container with ID starting with 5ce5a9cdee08bce79dcf1b256e5891aca20fd27a854fc039981205c1613891d2 not found: ID does not exist" containerID="5ce5a9cdee08bce79dcf1b256e5891aca20fd27a854fc039981205c1613891d2" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.594154 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ce5a9cdee08bce79dcf1b256e5891aca20fd27a854fc039981205c1613891d2"} err="failed to get container status \"5ce5a9cdee08bce79dcf1b256e5891aca20fd27a854fc039981205c1613891d2\": rpc error: code = NotFound desc = could not find container \"5ce5a9cdee08bce79dcf1b256e5891aca20fd27a854fc039981205c1613891d2\": container with ID starting with 5ce5a9cdee08bce79dcf1b256e5891aca20fd27a854fc039981205c1613891d2 not found: ID does not exist" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.594188 4784 scope.go:117] "RemoveContainer" containerID="eb574f8dc15bda5a7b93b7b35f3b24e7aa42545f4dd75ab2c8b197a9f3f4778d" Jan 06 08:19:48 crc kubenswrapper[4784]: E0106 08:19:48.594591 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb574f8dc15bda5a7b93b7b35f3b24e7aa42545f4dd75ab2c8b197a9f3f4778d\": container with ID starting with eb574f8dc15bda5a7b93b7b35f3b24e7aa42545f4dd75ab2c8b197a9f3f4778d not found: ID does not exist" containerID="eb574f8dc15bda5a7b93b7b35f3b24e7aa42545f4dd75ab2c8b197a9f3f4778d" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.594644 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb574f8dc15bda5a7b93b7b35f3b24e7aa42545f4dd75ab2c8b197a9f3f4778d"} err="failed to get container status \"eb574f8dc15bda5a7b93b7b35f3b24e7aa42545f4dd75ab2c8b197a9f3f4778d\": rpc error: code = NotFound desc = could not find container \"eb574f8dc15bda5a7b93b7b35f3b24e7aa42545f4dd75ab2c8b197a9f3f4778d\": container with ID starting with eb574f8dc15bda5a7b93b7b35f3b24e7aa42545f4dd75ab2c8b197a9f3f4778d not found: ID does not exist" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.594685 4784 scope.go:117] "RemoveContainer" containerID="eee8e10a9f7671da51cd771e569dea538d14c83d2605d8a8cd3b127fc1538a54" Jan 06 08:19:48 crc kubenswrapper[4784]: E0106 08:19:48.595015 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eee8e10a9f7671da51cd771e569dea538d14c83d2605d8a8cd3b127fc1538a54\": container with ID starting with eee8e10a9f7671da51cd771e569dea538d14c83d2605d8a8cd3b127fc1538a54 not found: ID does not exist" containerID="eee8e10a9f7671da51cd771e569dea538d14c83d2605d8a8cd3b127fc1538a54" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.595050 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eee8e10a9f7671da51cd771e569dea538d14c83d2605d8a8cd3b127fc1538a54"} err="failed to get container status \"eee8e10a9f7671da51cd771e569dea538d14c83d2605d8a8cd3b127fc1538a54\": rpc error: code = NotFound desc = could not find container \"eee8e10a9f7671da51cd771e569dea538d14c83d2605d8a8cd3b127fc1538a54\": container with ID starting with eee8e10a9f7671da51cd771e569dea538d14c83d2605d8a8cd3b127fc1538a54 not found: ID does not exist" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.595073 4784 scope.go:117] "RemoveContainer" containerID="edc01d92310dc24dfe3255462cc87817661581d3d6b76fdab6cd293aade7e070" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.610132 4784 scope.go:117] "RemoveContainer" containerID="d7e1e8933b5f35deb377922887e9b4816c5c691360bb5ad81b3b067388d71eb0" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.625217 4784 scope.go:117] "RemoveContainer" containerID="cc523b2086978fe5ad2e48d1215e9cbc8b60bdb3aff332ff09aad809a2eade1e" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.639907 4784 scope.go:117] "RemoveContainer" containerID="305769838f06ffe5f19131b14f50bd21bf51899188c79bfb0f0b6986c618804f" Jan 06 08:19:48 crc kubenswrapper[4784]: I0106 08:19:48.658040 4784 scope.go:117] "RemoveContainer" containerID="cd1b1691f2343bdaab939532ded996460d6a9f7233c48e8807f0e912318e08b7" Jan 06 08:19:50 crc kubenswrapper[4784]: I0106 08:19:50.319334 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97" path="/var/lib/kubelet/pods/47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97/volumes" Jan 06 08:19:50 crc kubenswrapper[4784]: I0106 08:19:50.320828 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77358ffe-6346-4747-9847-27c607f4a2a3" path="/var/lib/kubelet/pods/77358ffe-6346-4747-9847-27c607f4a2a3/volumes" Jan 06 08:19:50 crc kubenswrapper[4784]: I0106 08:19:50.322089 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="971db4c5-67a0-42f5-b71f-042c91d097b5" path="/var/lib/kubelet/pods/971db4c5-67a0-42f5-b71f-042c91d097b5/volumes" Jan 06 08:19:50 crc kubenswrapper[4784]: I0106 08:19:50.324183 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6eac71f-1d65-4542-9722-211fee770bba" path="/var/lib/kubelet/pods/b6eac71f-1d65-4542-9722-211fee770bba/volumes" Jan 06 08:19:50 crc kubenswrapper[4784]: I0106 08:19:50.325305 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8eba439-a397-4536-8b2e-cde21cfc1384" path="/var/lib/kubelet/pods/d8eba439-a397-4536-8b2e-cde21cfc1384/volumes" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.314847 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mq9b8"] Jan 06 08:19:53 crc kubenswrapper[4784]: E0106 08:19:53.315489 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77358ffe-6346-4747-9847-27c607f4a2a3" containerName="extract-content" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315506 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="77358ffe-6346-4747-9847-27c607f4a2a3" containerName="extract-content" Jan 06 08:19:53 crc kubenswrapper[4784]: E0106 08:19:53.315518 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6eac71f-1d65-4542-9722-211fee770bba" containerName="extract-utilities" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315526 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6eac71f-1d65-4542-9722-211fee770bba" containerName="extract-utilities" Jan 06 08:19:53 crc kubenswrapper[4784]: E0106 08:19:53.315535 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77358ffe-6346-4747-9847-27c607f4a2a3" containerName="registry-server" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315556 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="77358ffe-6346-4747-9847-27c607f4a2a3" containerName="registry-server" Jan 06 08:19:53 crc kubenswrapper[4784]: E0106 08:19:53.315566 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="971db4c5-67a0-42f5-b71f-042c91d097b5" containerName="extract-content" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315571 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="971db4c5-67a0-42f5-b71f-042c91d097b5" containerName="extract-content" Jan 06 08:19:53 crc kubenswrapper[4784]: E0106 08:19:53.315581 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="971db4c5-67a0-42f5-b71f-042c91d097b5" containerName="extract-utilities" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315588 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="971db4c5-67a0-42f5-b71f-042c91d097b5" containerName="extract-utilities" Jan 06 08:19:53 crc kubenswrapper[4784]: E0106 08:19:53.315598 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6eac71f-1d65-4542-9722-211fee770bba" containerName="registry-server" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315605 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6eac71f-1d65-4542-9722-211fee770bba" containerName="registry-server" Jan 06 08:19:53 crc kubenswrapper[4784]: E0106 08:19:53.315613 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" containerName="installer" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315620 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" containerName="installer" Jan 06 08:19:53 crc kubenswrapper[4784]: E0106 08:19:53.315629 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77358ffe-6346-4747-9847-27c607f4a2a3" containerName="extract-utilities" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315635 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="77358ffe-6346-4747-9847-27c607f4a2a3" containerName="extract-utilities" Jan 06 08:19:53 crc kubenswrapper[4784]: E0106 08:19:53.315644 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315653 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 06 08:19:53 crc kubenswrapper[4784]: E0106 08:19:53.315660 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6eac71f-1d65-4542-9722-211fee770bba" containerName="extract-content" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315668 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6eac71f-1d65-4542-9722-211fee770bba" containerName="extract-content" Jan 06 08:19:53 crc kubenswrapper[4784]: E0106 08:19:53.315675 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8eba439-a397-4536-8b2e-cde21cfc1384" containerName="registry-server" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315682 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8eba439-a397-4536-8b2e-cde21cfc1384" containerName="registry-server" Jan 06 08:19:53 crc kubenswrapper[4784]: E0106 08:19:53.315691 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97" containerName="marketplace-operator" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315699 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97" containerName="marketplace-operator" Jan 06 08:19:53 crc kubenswrapper[4784]: E0106 08:19:53.315711 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8eba439-a397-4536-8b2e-cde21cfc1384" containerName="extract-content" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315717 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8eba439-a397-4536-8b2e-cde21cfc1384" containerName="extract-content" Jan 06 08:19:53 crc kubenswrapper[4784]: E0106 08:19:53.315725 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8eba439-a397-4536-8b2e-cde21cfc1384" containerName="extract-utilities" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315732 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8eba439-a397-4536-8b2e-cde21cfc1384" containerName="extract-utilities" Jan 06 08:19:53 crc kubenswrapper[4784]: E0106 08:19:53.315740 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="971db4c5-67a0-42f5-b71f-042c91d097b5" containerName="registry-server" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315746 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="971db4c5-67a0-42f5-b71f-042c91d097b5" containerName="registry-server" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315842 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315850 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="971db4c5-67a0-42f5-b71f-042c91d097b5" containerName="registry-server" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315858 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8eba439-a397-4536-8b2e-cde21cfc1384" containerName="registry-server" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315870 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="77358ffe-6346-4747-9847-27c607f4a2a3" containerName="registry-server" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315881 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6eac71f-1d65-4542-9722-211fee770bba" containerName="registry-server" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315890 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="47c8d91e-1aa9-474c-ac9a-c1d4a43b0d97" containerName="marketplace-operator" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.315897 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b728c5c-d3f8-4dd1-bc84-306c50c17eb4" containerName="installer" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.316490 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.321337 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.321627 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.321772 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.323766 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.346034 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mq9b8"] Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.350308 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.403108 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpsfk\" (UniqueName: \"kubernetes.io/projected/51cd7ec7-c900-4f63-bcee-5f0f9e215e69-kube-api-access-jpsfk\") pod \"marketplace-operator-79b997595-mq9b8\" (UID: \"51cd7ec7-c900-4f63-bcee-5f0f9e215e69\") " pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.403232 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/51cd7ec7-c900-4f63-bcee-5f0f9e215e69-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mq9b8\" (UID: \"51cd7ec7-c900-4f63-bcee-5f0f9e215e69\") " pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.403273 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/51cd7ec7-c900-4f63-bcee-5f0f9e215e69-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mq9b8\" (UID: \"51cd7ec7-c900-4f63-bcee-5f0f9e215e69\") " pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.504720 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/51cd7ec7-c900-4f63-bcee-5f0f9e215e69-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mq9b8\" (UID: \"51cd7ec7-c900-4f63-bcee-5f0f9e215e69\") " pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.504777 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/51cd7ec7-c900-4f63-bcee-5f0f9e215e69-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mq9b8\" (UID: \"51cd7ec7-c900-4f63-bcee-5f0f9e215e69\") " pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.504832 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpsfk\" (UniqueName: \"kubernetes.io/projected/51cd7ec7-c900-4f63-bcee-5f0f9e215e69-kube-api-access-jpsfk\") pod \"marketplace-operator-79b997595-mq9b8\" (UID: \"51cd7ec7-c900-4f63-bcee-5f0f9e215e69\") " pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.507774 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/51cd7ec7-c900-4f63-bcee-5f0f9e215e69-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mq9b8\" (UID: \"51cd7ec7-c900-4f63-bcee-5f0f9e215e69\") " pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.513771 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/51cd7ec7-c900-4f63-bcee-5f0f9e215e69-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mq9b8\" (UID: \"51cd7ec7-c900-4f63-bcee-5f0f9e215e69\") " pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.524741 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpsfk\" (UniqueName: \"kubernetes.io/projected/51cd7ec7-c900-4f63-bcee-5f0f9e215e69-kube-api-access-jpsfk\") pod \"marketplace-operator-79b997595-mq9b8\" (UID: \"51cd7ec7-c900-4f63-bcee-5f0f9e215e69\") " pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.644500 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" Jan 06 08:19:53 crc kubenswrapper[4784]: I0106 08:19:53.855850 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mq9b8"] Jan 06 08:19:54 crc kubenswrapper[4784]: I0106 08:19:54.417903 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" event={"ID":"51cd7ec7-c900-4f63-bcee-5f0f9e215e69","Type":"ContainerStarted","Data":"a185a5dfe0e596869194933e04603096a8e80f7099cb8d975922a99635ecc75e"} Jan 06 08:19:54 crc kubenswrapper[4784]: I0106 08:19:54.418398 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" event={"ID":"51cd7ec7-c900-4f63-bcee-5f0f9e215e69","Type":"ContainerStarted","Data":"b8714be9268423edb92eecfe59377748ab956390a00099afbedd89382d57d36d"} Jan 06 08:19:54 crc kubenswrapper[4784]: I0106 08:19:54.418419 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" Jan 06 08:19:54 crc kubenswrapper[4784]: I0106 08:19:54.424913 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" Jan 06 08:19:54 crc kubenswrapper[4784]: I0106 08:19:54.446385 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-mq9b8" podStartSLOduration=1.4463459379999999 podStartE2EDuration="1.446345938s" podCreationTimestamp="2026-01-06 08:19:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:19:54.440885153 +0000 UTC m=+296.487057990" watchObservedRunningTime="2026-01-06 08:19:54.446345938 +0000 UTC m=+296.492518785" Jan 06 08:19:58 crc kubenswrapper[4784]: I0106 08:19:58.152502 4784 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 06 08:20:09 crc kubenswrapper[4784]: I0106 08:20:09.696607 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-9tb2r"] Jan 06 08:20:09 crc kubenswrapper[4784]: I0106 08:20:09.697368 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" podUID="ff8d015b-29e9-47bf-8735-eec268cb7d3a" containerName="controller-manager" containerID="cri-o://980bba8499224aebf92c938999d05fa566aed8545fce7a6f83e4ce4d764459a8" gracePeriod=30 Jan 06 08:20:09 crc kubenswrapper[4784]: I0106 08:20:09.798817 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv"] Jan 06 08:20:09 crc kubenswrapper[4784]: I0106 08:20:09.799122 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" podUID="58b87341-53f1-4b38-807b-964e45e69986" containerName="route-controller-manager" containerID="cri-o://1758ac25d01feee91c246617a0972255f28a1660cfbf677628870e9f3345643d" gracePeriod=30 Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.098945 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.198479 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.235134 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ff8d015b-29e9-47bf-8735-eec268cb7d3a-serving-cert\") pod \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.235441 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2vcs\" (UniqueName: \"kubernetes.io/projected/ff8d015b-29e9-47bf-8735-eec268cb7d3a-kube-api-access-p2vcs\") pod \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.235626 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-config\") pod \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.235667 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58b87341-53f1-4b38-807b-964e45e69986-serving-cert\") pod \"58b87341-53f1-4b38-807b-964e45e69986\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.235686 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58b87341-53f1-4b38-807b-964e45e69986-config\") pod \"58b87341-53f1-4b38-807b-964e45e69986\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.235707 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-proxy-ca-bundles\") pod \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.235736 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgt7f\" (UniqueName: \"kubernetes.io/projected/58b87341-53f1-4b38-807b-964e45e69986-kube-api-access-vgt7f\") pod \"58b87341-53f1-4b38-807b-964e45e69986\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.235764 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-client-ca\") pod \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\" (UID: \"ff8d015b-29e9-47bf-8735-eec268cb7d3a\") " Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.235784 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/58b87341-53f1-4b38-807b-964e45e69986-client-ca\") pod \"58b87341-53f1-4b38-807b-964e45e69986\" (UID: \"58b87341-53f1-4b38-807b-964e45e69986\") " Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.236359 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "ff8d015b-29e9-47bf-8735-eec268cb7d3a" (UID: "ff8d015b-29e9-47bf-8735-eec268cb7d3a"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.236391 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58b87341-53f1-4b38-807b-964e45e69986-client-ca" (OuterVolumeSpecName: "client-ca") pod "58b87341-53f1-4b38-807b-964e45e69986" (UID: "58b87341-53f1-4b38-807b-964e45e69986"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.236646 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58b87341-53f1-4b38-807b-964e45e69986-config" (OuterVolumeSpecName: "config") pod "58b87341-53f1-4b38-807b-964e45e69986" (UID: "58b87341-53f1-4b38-807b-964e45e69986"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.236681 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-config" (OuterVolumeSpecName: "config") pod "ff8d015b-29e9-47bf-8735-eec268cb7d3a" (UID: "ff8d015b-29e9-47bf-8735-eec268cb7d3a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.236815 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-client-ca" (OuterVolumeSpecName: "client-ca") pod "ff8d015b-29e9-47bf-8735-eec268cb7d3a" (UID: "ff8d015b-29e9-47bf-8735-eec268cb7d3a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.243106 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff8d015b-29e9-47bf-8735-eec268cb7d3a-kube-api-access-p2vcs" (OuterVolumeSpecName: "kube-api-access-p2vcs") pod "ff8d015b-29e9-47bf-8735-eec268cb7d3a" (UID: "ff8d015b-29e9-47bf-8735-eec268cb7d3a"). InnerVolumeSpecName "kube-api-access-p2vcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.243206 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58b87341-53f1-4b38-807b-964e45e69986-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "58b87341-53f1-4b38-807b-964e45e69986" (UID: "58b87341-53f1-4b38-807b-964e45e69986"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.244844 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58b87341-53f1-4b38-807b-964e45e69986-kube-api-access-vgt7f" (OuterVolumeSpecName: "kube-api-access-vgt7f") pod "58b87341-53f1-4b38-807b-964e45e69986" (UID: "58b87341-53f1-4b38-807b-964e45e69986"). InnerVolumeSpecName "kube-api-access-vgt7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.246428 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff8d015b-29e9-47bf-8735-eec268cb7d3a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ff8d015b-29e9-47bf-8735-eec268cb7d3a" (UID: "ff8d015b-29e9-47bf-8735-eec268cb7d3a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.336691 4784 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-client-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.336725 4784 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/58b87341-53f1-4b38-807b-964e45e69986-client-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.336738 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ff8d015b-29e9-47bf-8735-eec268cb7d3a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.336750 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2vcs\" (UniqueName: \"kubernetes.io/projected/ff8d015b-29e9-47bf-8735-eec268cb7d3a-kube-api-access-p2vcs\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.336764 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.336777 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58b87341-53f1-4b38-807b-964e45e69986-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.336788 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58b87341-53f1-4b38-807b-964e45e69986-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.336800 4784 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ff8d015b-29e9-47bf-8735-eec268cb7d3a-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.336812 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgt7f\" (UniqueName: \"kubernetes.io/projected/58b87341-53f1-4b38-807b-964e45e69986-kube-api-access-vgt7f\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.528819 4784 generic.go:334] "Generic (PLEG): container finished" podID="58b87341-53f1-4b38-807b-964e45e69986" containerID="1758ac25d01feee91c246617a0972255f28a1660cfbf677628870e9f3345643d" exitCode=0 Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.528891 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.528940 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" event={"ID":"58b87341-53f1-4b38-807b-964e45e69986","Type":"ContainerDied","Data":"1758ac25d01feee91c246617a0972255f28a1660cfbf677628870e9f3345643d"} Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.529045 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv" event={"ID":"58b87341-53f1-4b38-807b-964e45e69986","Type":"ContainerDied","Data":"134569ceee786eef2819cab1a02ab05e2acb623f785e81f0fb757ee2e46b9f91"} Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.529116 4784 scope.go:117] "RemoveContainer" containerID="1758ac25d01feee91c246617a0972255f28a1660cfbf677628870e9f3345643d" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.530835 4784 generic.go:334] "Generic (PLEG): container finished" podID="ff8d015b-29e9-47bf-8735-eec268cb7d3a" containerID="980bba8499224aebf92c938999d05fa566aed8545fce7a6f83e4ce4d764459a8" exitCode=0 Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.530868 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" event={"ID":"ff8d015b-29e9-47bf-8735-eec268cb7d3a","Type":"ContainerDied","Data":"980bba8499224aebf92c938999d05fa566aed8545fce7a6f83e4ce4d764459a8"} Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.530891 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" event={"ID":"ff8d015b-29e9-47bf-8735-eec268cb7d3a","Type":"ContainerDied","Data":"e502ce19398b8beeff7a6786af782f11808a235033e00353fa6a1cc69bf6138a"} Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.530970 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-9tb2r" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.549564 4784 scope.go:117] "RemoveContainer" containerID="1758ac25d01feee91c246617a0972255f28a1660cfbf677628870e9f3345643d" Jan 06 08:20:10 crc kubenswrapper[4784]: E0106 08:20:10.550463 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1758ac25d01feee91c246617a0972255f28a1660cfbf677628870e9f3345643d\": container with ID starting with 1758ac25d01feee91c246617a0972255f28a1660cfbf677628870e9f3345643d not found: ID does not exist" containerID="1758ac25d01feee91c246617a0972255f28a1660cfbf677628870e9f3345643d" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.550500 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1758ac25d01feee91c246617a0972255f28a1660cfbf677628870e9f3345643d"} err="failed to get container status \"1758ac25d01feee91c246617a0972255f28a1660cfbf677628870e9f3345643d\": rpc error: code = NotFound desc = could not find container \"1758ac25d01feee91c246617a0972255f28a1660cfbf677628870e9f3345643d\": container with ID starting with 1758ac25d01feee91c246617a0972255f28a1660cfbf677628870e9f3345643d not found: ID does not exist" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.550523 4784 scope.go:117] "RemoveContainer" containerID="980bba8499224aebf92c938999d05fa566aed8545fce7a6f83e4ce4d764459a8" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.557433 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv"] Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.564505 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2tmmv"] Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.578838 4784 scope.go:117] "RemoveContainer" containerID="980bba8499224aebf92c938999d05fa566aed8545fce7a6f83e4ce4d764459a8" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.580151 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-9tb2r"] Jan 06 08:20:10 crc kubenswrapper[4784]: E0106 08:20:10.581009 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"980bba8499224aebf92c938999d05fa566aed8545fce7a6f83e4ce4d764459a8\": container with ID starting with 980bba8499224aebf92c938999d05fa566aed8545fce7a6f83e4ce4d764459a8 not found: ID does not exist" containerID="980bba8499224aebf92c938999d05fa566aed8545fce7a6f83e4ce4d764459a8" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.581074 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"980bba8499224aebf92c938999d05fa566aed8545fce7a6f83e4ce4d764459a8"} err="failed to get container status \"980bba8499224aebf92c938999d05fa566aed8545fce7a6f83e4ce4d764459a8\": rpc error: code = NotFound desc = could not find container \"980bba8499224aebf92c938999d05fa566aed8545fce7a6f83e4ce4d764459a8\": container with ID starting with 980bba8499224aebf92c938999d05fa566aed8545fce7a6f83e4ce4d764459a8 not found: ID does not exist" Jan 06 08:20:10 crc kubenswrapper[4784]: I0106 08:20:10.583990 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-9tb2r"] Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.566680 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9"] Jan 06 08:20:11 crc kubenswrapper[4784]: E0106 08:20:11.567193 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58b87341-53f1-4b38-807b-964e45e69986" containerName="route-controller-manager" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.567226 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="58b87341-53f1-4b38-807b-964e45e69986" containerName="route-controller-manager" Jan 06 08:20:11 crc kubenswrapper[4784]: E0106 08:20:11.567281 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff8d015b-29e9-47bf-8735-eec268cb7d3a" containerName="controller-manager" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.567295 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff8d015b-29e9-47bf-8735-eec268cb7d3a" containerName="controller-manager" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.567470 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff8d015b-29e9-47bf-8735-eec268cb7d3a" containerName="controller-manager" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.567502 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="58b87341-53f1-4b38-807b-964e45e69986" containerName="route-controller-manager" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.568163 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.570369 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.570638 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.570783 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2"] Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.573063 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.574752 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.575020 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.594921 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.595566 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.595773 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.595993 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.596985 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.597308 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.598910 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.601796 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.602747 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.612103 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9"] Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.618769 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2"] Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.758378 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfjcg\" (UniqueName: \"kubernetes.io/projected/ce594f68-aefa-46e6-b8df-ae03438daca5-kube-api-access-dfjcg\") pod \"controller-manager-bd8d95bb7-gd7j2\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.758984 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-config\") pod \"route-controller-manager-ff9bfc955-drsn9\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.759102 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-proxy-ca-bundles\") pod \"controller-manager-bd8d95bb7-gd7j2\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.759172 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-config\") pod \"controller-manager-bd8d95bb7-gd7j2\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.759204 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-client-ca\") pod \"route-controller-manager-ff9bfc955-drsn9\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.759236 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-client-ca\") pod \"controller-manager-bd8d95bb7-gd7j2\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.759254 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4l57r\" (UniqueName: \"kubernetes.io/projected/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-kube-api-access-4l57r\") pod \"route-controller-manager-ff9bfc955-drsn9\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.759300 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce594f68-aefa-46e6-b8df-ae03438daca5-serving-cert\") pod \"controller-manager-bd8d95bb7-gd7j2\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.759323 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-serving-cert\") pod \"route-controller-manager-ff9bfc955-drsn9\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.860311 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfjcg\" (UniqueName: \"kubernetes.io/projected/ce594f68-aefa-46e6-b8df-ae03438daca5-kube-api-access-dfjcg\") pod \"controller-manager-bd8d95bb7-gd7j2\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.860398 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-config\") pod \"route-controller-manager-ff9bfc955-drsn9\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.860468 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-proxy-ca-bundles\") pod \"controller-manager-bd8d95bb7-gd7j2\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.860522 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-config\") pod \"controller-manager-bd8d95bb7-gd7j2\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.860575 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-client-ca\") pod \"route-controller-manager-ff9bfc955-drsn9\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.860608 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-client-ca\") pod \"controller-manager-bd8d95bb7-gd7j2\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.860647 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4l57r\" (UniqueName: \"kubernetes.io/projected/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-kube-api-access-4l57r\") pod \"route-controller-manager-ff9bfc955-drsn9\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.860687 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce594f68-aefa-46e6-b8df-ae03438daca5-serving-cert\") pod \"controller-manager-bd8d95bb7-gd7j2\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.860728 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-serving-cert\") pod \"route-controller-manager-ff9bfc955-drsn9\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.861846 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-client-ca\") pod \"route-controller-manager-ff9bfc955-drsn9\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.862140 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-config\") pod \"controller-manager-bd8d95bb7-gd7j2\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.862528 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-proxy-ca-bundles\") pod \"controller-manager-bd8d95bb7-gd7j2\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.862823 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-client-ca\") pod \"controller-manager-bd8d95bb7-gd7j2\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.865868 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-config\") pod \"route-controller-manager-ff9bfc955-drsn9\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.871989 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce594f68-aefa-46e6-b8df-ae03438daca5-serving-cert\") pod \"controller-manager-bd8d95bb7-gd7j2\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.876334 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-serving-cert\") pod \"route-controller-manager-ff9bfc955-drsn9\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.878870 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfjcg\" (UniqueName: \"kubernetes.io/projected/ce594f68-aefa-46e6-b8df-ae03438daca5-kube-api-access-dfjcg\") pod \"controller-manager-bd8d95bb7-gd7j2\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.882368 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4l57r\" (UniqueName: \"kubernetes.io/projected/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-kube-api-access-4l57r\") pod \"route-controller-manager-ff9bfc955-drsn9\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.891119 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:11 crc kubenswrapper[4784]: I0106 08:20:11.900294 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:12 crc kubenswrapper[4784]: I0106 08:20:12.168091 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2"] Jan 06 08:20:12 crc kubenswrapper[4784]: I0106 08:20:12.322247 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58b87341-53f1-4b38-807b-964e45e69986" path="/var/lib/kubelet/pods/58b87341-53f1-4b38-807b-964e45e69986/volumes" Jan 06 08:20:12 crc kubenswrapper[4784]: I0106 08:20:12.323923 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff8d015b-29e9-47bf-8735-eec268cb7d3a" path="/var/lib/kubelet/pods/ff8d015b-29e9-47bf-8735-eec268cb7d3a/volumes" Jan 06 08:20:12 crc kubenswrapper[4784]: I0106 08:20:12.434907 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9"] Jan 06 08:20:12 crc kubenswrapper[4784]: W0106 08:20:12.440760 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9bff02d4_7afa_4ea7_81b2_e7ca5723835f.slice/crio-cd4b1859f3c2833c4148174b1a362d992606b723a0e37baf9cf5f7ce99525592 WatchSource:0}: Error finding container cd4b1859f3c2833c4148174b1a362d992606b723a0e37baf9cf5f7ce99525592: Status 404 returned error can't find the container with id cd4b1859f3c2833c4148174b1a362d992606b723a0e37baf9cf5f7ce99525592 Jan 06 08:20:12 crc kubenswrapper[4784]: I0106 08:20:12.549624 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" event={"ID":"9bff02d4-7afa-4ea7-81b2-e7ca5723835f","Type":"ContainerStarted","Data":"cd4b1859f3c2833c4148174b1a362d992606b723a0e37baf9cf5f7ce99525592"} Jan 06 08:20:12 crc kubenswrapper[4784]: I0106 08:20:12.551212 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" event={"ID":"ce594f68-aefa-46e6-b8df-ae03438daca5","Type":"ContainerStarted","Data":"45f3160143ba5d09845958e276c2bb175fef9bfc5c5e77c7e2ef22c5ec4a5602"} Jan 06 08:20:12 crc kubenswrapper[4784]: I0106 08:20:12.551242 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" event={"ID":"ce594f68-aefa-46e6-b8df-ae03438daca5","Type":"ContainerStarted","Data":"f9ebc94cf6a8164b2b0d05a5a5afe6a9c5c435c8d03a016f6239afbfe9b123aa"} Jan 06 08:20:12 crc kubenswrapper[4784]: I0106 08:20:12.552469 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:12 crc kubenswrapper[4784]: I0106 08:20:12.584854 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" podStartSLOduration=2.584825961 podStartE2EDuration="2.584825961s" podCreationTimestamp="2026-01-06 08:20:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:20:12.582426734 +0000 UTC m=+314.628599571" watchObservedRunningTime="2026-01-06 08:20:12.584825961 +0000 UTC m=+314.630998798" Jan 06 08:20:12 crc kubenswrapper[4784]: I0106 08:20:12.588693 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:13 crc kubenswrapper[4784]: I0106 08:20:13.559724 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" event={"ID":"9bff02d4-7afa-4ea7-81b2-e7ca5723835f","Type":"ContainerStarted","Data":"74a7589c01a617303f14feca3d9895ffb96ccde9076a050a5becf5fbcd4e1b47"} Jan 06 08:20:13 crc kubenswrapper[4784]: I0106 08:20:13.561772 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:13 crc kubenswrapper[4784]: I0106 08:20:13.565854 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:13 crc kubenswrapper[4784]: I0106 08:20:13.576276 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" podStartSLOduration=3.576259215 podStartE2EDuration="3.576259215s" podCreationTimestamp="2026-01-06 08:20:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:20:13.575379566 +0000 UTC m=+315.621552403" watchObservedRunningTime="2026-01-06 08:20:13.576259215 +0000 UTC m=+315.622432062" Jan 06 08:20:29 crc kubenswrapper[4784]: I0106 08:20:29.636284 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9"] Jan 06 08:20:29 crc kubenswrapper[4784]: I0106 08:20:29.637189 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" podUID="9bff02d4-7afa-4ea7-81b2-e7ca5723835f" containerName="route-controller-manager" containerID="cri-o://74a7589c01a617303f14feca3d9895ffb96ccde9076a050a5becf5fbcd4e1b47" gracePeriod=30 Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.125157 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.274076 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-client-ca\") pod \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.274209 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-serving-cert\") pod \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.274251 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4l57r\" (UniqueName: \"kubernetes.io/projected/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-kube-api-access-4l57r\") pod \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.274302 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-config\") pod \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\" (UID: \"9bff02d4-7afa-4ea7-81b2-e7ca5723835f\") " Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.275482 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-client-ca" (OuterVolumeSpecName: "client-ca") pod "9bff02d4-7afa-4ea7-81b2-e7ca5723835f" (UID: "9bff02d4-7afa-4ea7-81b2-e7ca5723835f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.275666 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-config" (OuterVolumeSpecName: "config") pod "9bff02d4-7afa-4ea7-81b2-e7ca5723835f" (UID: "9bff02d4-7afa-4ea7-81b2-e7ca5723835f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.282250 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-kube-api-access-4l57r" (OuterVolumeSpecName: "kube-api-access-4l57r") pod "9bff02d4-7afa-4ea7-81b2-e7ca5723835f" (UID: "9bff02d4-7afa-4ea7-81b2-e7ca5723835f"). InnerVolumeSpecName "kube-api-access-4l57r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.286083 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9bff02d4-7afa-4ea7-81b2-e7ca5723835f" (UID: "9bff02d4-7afa-4ea7-81b2-e7ca5723835f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.376017 4784 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-client-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.376058 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.376072 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4l57r\" (UniqueName: \"kubernetes.io/projected/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-kube-api-access-4l57r\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.376092 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bff02d4-7afa-4ea7-81b2-e7ca5723835f-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.677690 4784 generic.go:334] "Generic (PLEG): container finished" podID="9bff02d4-7afa-4ea7-81b2-e7ca5723835f" containerID="74a7589c01a617303f14feca3d9895ffb96ccde9076a050a5becf5fbcd4e1b47" exitCode=0 Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.677754 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" event={"ID":"9bff02d4-7afa-4ea7-81b2-e7ca5723835f","Type":"ContainerDied","Data":"74a7589c01a617303f14feca3d9895ffb96ccde9076a050a5becf5fbcd4e1b47"} Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.677791 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.677821 4784 scope.go:117] "RemoveContainer" containerID="74a7589c01a617303f14feca3d9895ffb96ccde9076a050a5becf5fbcd4e1b47" Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.677804 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9" event={"ID":"9bff02d4-7afa-4ea7-81b2-e7ca5723835f","Type":"ContainerDied","Data":"cd4b1859f3c2833c4148174b1a362d992606b723a0e37baf9cf5f7ce99525592"} Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.710076 4784 scope.go:117] "RemoveContainer" containerID="74a7589c01a617303f14feca3d9895ffb96ccde9076a050a5becf5fbcd4e1b47" Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.710827 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9"] Jan 06 08:20:30 crc kubenswrapper[4784]: E0106 08:20:30.711079 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74a7589c01a617303f14feca3d9895ffb96ccde9076a050a5becf5fbcd4e1b47\": container with ID starting with 74a7589c01a617303f14feca3d9895ffb96ccde9076a050a5becf5fbcd4e1b47 not found: ID does not exist" containerID="74a7589c01a617303f14feca3d9895ffb96ccde9076a050a5becf5fbcd4e1b47" Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.711182 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74a7589c01a617303f14feca3d9895ffb96ccde9076a050a5becf5fbcd4e1b47"} err="failed to get container status \"74a7589c01a617303f14feca3d9895ffb96ccde9076a050a5becf5fbcd4e1b47\": rpc error: code = NotFound desc = could not find container \"74a7589c01a617303f14feca3d9895ffb96ccde9076a050a5becf5fbcd4e1b47\": container with ID starting with 74a7589c01a617303f14feca3d9895ffb96ccde9076a050a5becf5fbcd4e1b47 not found: ID does not exist" Jan 06 08:20:30 crc kubenswrapper[4784]: I0106 08:20:30.717629 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ff9bfc955-drsn9"] Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.579382 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p"] Jan 06 08:20:31 crc kubenswrapper[4784]: E0106 08:20:31.580083 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bff02d4-7afa-4ea7-81b2-e7ca5723835f" containerName="route-controller-manager" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.580106 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bff02d4-7afa-4ea7-81b2-e7ca5723835f" containerName="route-controller-manager" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.580279 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="9bff02d4-7afa-4ea7-81b2-e7ca5723835f" containerName="route-controller-manager" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.580878 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.583334 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.584767 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.584922 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.586991 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.587562 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.589901 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.592445 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3705e1cb-c3c9-4a33-ac37-d99174bc66f7-config\") pod \"route-controller-manager-65fbb66456-mmk7p\" (UID: \"3705e1cb-c3c9-4a33-ac37-d99174bc66f7\") " pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.592493 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmrfm\" (UniqueName: \"kubernetes.io/projected/3705e1cb-c3c9-4a33-ac37-d99174bc66f7-kube-api-access-cmrfm\") pod \"route-controller-manager-65fbb66456-mmk7p\" (UID: \"3705e1cb-c3c9-4a33-ac37-d99174bc66f7\") " pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.592571 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3705e1cb-c3c9-4a33-ac37-d99174bc66f7-serving-cert\") pod \"route-controller-manager-65fbb66456-mmk7p\" (UID: \"3705e1cb-c3c9-4a33-ac37-d99174bc66f7\") " pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.592608 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3705e1cb-c3c9-4a33-ac37-d99174bc66f7-client-ca\") pod \"route-controller-manager-65fbb66456-mmk7p\" (UID: \"3705e1cb-c3c9-4a33-ac37-d99174bc66f7\") " pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.602628 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p"] Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.693655 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3705e1cb-c3c9-4a33-ac37-d99174bc66f7-serving-cert\") pod \"route-controller-manager-65fbb66456-mmk7p\" (UID: \"3705e1cb-c3c9-4a33-ac37-d99174bc66f7\") " pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.695231 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3705e1cb-c3c9-4a33-ac37-d99174bc66f7-client-ca\") pod \"route-controller-manager-65fbb66456-mmk7p\" (UID: \"3705e1cb-c3c9-4a33-ac37-d99174bc66f7\") " pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.695585 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3705e1cb-c3c9-4a33-ac37-d99174bc66f7-config\") pod \"route-controller-manager-65fbb66456-mmk7p\" (UID: \"3705e1cb-c3c9-4a33-ac37-d99174bc66f7\") " pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.695789 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmrfm\" (UniqueName: \"kubernetes.io/projected/3705e1cb-c3c9-4a33-ac37-d99174bc66f7-kube-api-access-cmrfm\") pod \"route-controller-manager-65fbb66456-mmk7p\" (UID: \"3705e1cb-c3c9-4a33-ac37-d99174bc66f7\") " pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.696985 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3705e1cb-c3c9-4a33-ac37-d99174bc66f7-client-ca\") pod \"route-controller-manager-65fbb66456-mmk7p\" (UID: \"3705e1cb-c3c9-4a33-ac37-d99174bc66f7\") " pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.697517 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3705e1cb-c3c9-4a33-ac37-d99174bc66f7-config\") pod \"route-controller-manager-65fbb66456-mmk7p\" (UID: \"3705e1cb-c3c9-4a33-ac37-d99174bc66f7\") " pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.707535 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3705e1cb-c3c9-4a33-ac37-d99174bc66f7-serving-cert\") pod \"route-controller-manager-65fbb66456-mmk7p\" (UID: \"3705e1cb-c3c9-4a33-ac37-d99174bc66f7\") " pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.729029 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmrfm\" (UniqueName: \"kubernetes.io/projected/3705e1cb-c3c9-4a33-ac37-d99174bc66f7-kube-api-access-cmrfm\") pod \"route-controller-manager-65fbb66456-mmk7p\" (UID: \"3705e1cb-c3c9-4a33-ac37-d99174bc66f7\") " pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:31 crc kubenswrapper[4784]: I0106 08:20:31.902670 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:32 crc kubenswrapper[4784]: I0106 08:20:32.331314 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9bff02d4-7afa-4ea7-81b2-e7ca5723835f" path="/var/lib/kubelet/pods/9bff02d4-7afa-4ea7-81b2-e7ca5723835f/volumes" Jan 06 08:20:32 crc kubenswrapper[4784]: I0106 08:20:32.408510 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p"] Jan 06 08:20:32 crc kubenswrapper[4784]: I0106 08:20:32.696509 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" event={"ID":"3705e1cb-c3c9-4a33-ac37-d99174bc66f7","Type":"ContainerStarted","Data":"0fcb9e784e17d9cacdc11dd6ae3dca9ed37e729ecff53c1693a0836df3478930"} Jan 06 08:20:32 crc kubenswrapper[4784]: I0106 08:20:32.697083 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:32 crc kubenswrapper[4784]: I0106 08:20:32.697116 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" event={"ID":"3705e1cb-c3c9-4a33-ac37-d99174bc66f7","Type":"ContainerStarted","Data":"842aeed3480f3fd33b3bc8f5906246d28a4ac51ae6ce5aa09f07ba0c80929292"} Jan 06 08:20:32 crc kubenswrapper[4784]: I0106 08:20:32.700328 4784 patch_prober.go:28] interesting pod/route-controller-manager-65fbb66456-mmk7p container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" start-of-body= Jan 06 08:20:32 crc kubenswrapper[4784]: I0106 08:20:32.700472 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" podUID="3705e1cb-c3c9-4a33-ac37-d99174bc66f7" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" Jan 06 08:20:32 crc kubenswrapper[4784]: I0106 08:20:32.733047 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" podStartSLOduration=3.733017629 podStartE2EDuration="3.733017629s" podCreationTimestamp="2026-01-06 08:20:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:20:32.725696859 +0000 UTC m=+334.771869726" watchObservedRunningTime="2026-01-06 08:20:32.733017629 +0000 UTC m=+334.779190496" Jan 06 08:20:33 crc kubenswrapper[4784]: I0106 08:20:33.708456 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-65fbb66456-mmk7p" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.741784 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-2xtmv"] Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.743459 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.766039 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-2xtmv"] Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.835221 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.835319 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9a988ba3-a694-4d2d-af11-cc5fb16b8883-trusted-ca\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.835355 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9a988ba3-a694-4d2d-af11-cc5fb16b8883-installation-pull-secrets\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.835402 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9a988ba3-a694-4d2d-af11-cc5fb16b8883-registry-certificates\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.835441 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9a988ba3-a694-4d2d-af11-cc5fb16b8883-registry-tls\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.835469 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9a988ba3-a694-4d2d-af11-cc5fb16b8883-ca-trust-extracted\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.835499 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9a988ba3-a694-4d2d-af11-cc5fb16b8883-bound-sa-token\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.835562 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nc9bg\" (UniqueName: \"kubernetes.io/projected/9a988ba3-a694-4d2d-af11-cc5fb16b8883-kube-api-access-nc9bg\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.865804 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.937280 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9a988ba3-a694-4d2d-af11-cc5fb16b8883-installation-pull-secrets\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.937386 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9a988ba3-a694-4d2d-af11-cc5fb16b8883-registry-certificates\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.937444 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9a988ba3-a694-4d2d-af11-cc5fb16b8883-registry-tls\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.937484 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9a988ba3-a694-4d2d-af11-cc5fb16b8883-ca-trust-extracted\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.937528 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9a988ba3-a694-4d2d-af11-cc5fb16b8883-bound-sa-token\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.937631 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nc9bg\" (UniqueName: \"kubernetes.io/projected/9a988ba3-a694-4d2d-af11-cc5fb16b8883-kube-api-access-nc9bg\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.937711 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9a988ba3-a694-4d2d-af11-cc5fb16b8883-trusted-ca\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.939474 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9a988ba3-a694-4d2d-af11-cc5fb16b8883-ca-trust-extracted\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.939493 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9a988ba3-a694-4d2d-af11-cc5fb16b8883-registry-certificates\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.941605 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9a988ba3-a694-4d2d-af11-cc5fb16b8883-trusted-ca\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.947316 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9a988ba3-a694-4d2d-af11-cc5fb16b8883-installation-pull-secrets\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.947596 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9a988ba3-a694-4d2d-af11-cc5fb16b8883-registry-tls\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.958503 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9a988ba3-a694-4d2d-af11-cc5fb16b8883-bound-sa-token\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:47 crc kubenswrapper[4784]: I0106 08:20:47.966698 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nc9bg\" (UniqueName: \"kubernetes.io/projected/9a988ba3-a694-4d2d-af11-cc5fb16b8883-kube-api-access-nc9bg\") pod \"image-registry-66df7c8f76-2xtmv\" (UID: \"9a988ba3-a694-4d2d-af11-cc5fb16b8883\") " pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:48 crc kubenswrapper[4784]: I0106 08:20:48.098692 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:48 crc kubenswrapper[4784]: I0106 08:20:48.586902 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-2xtmv"] Jan 06 08:20:48 crc kubenswrapper[4784]: W0106 08:20:48.590128 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9a988ba3_a694_4d2d_af11_cc5fb16b8883.slice/crio-f7380f7bcd7afaf3c88396ba057a4f0560510fb6e54b34316abda7eb1ad5bbdd WatchSource:0}: Error finding container f7380f7bcd7afaf3c88396ba057a4f0560510fb6e54b34316abda7eb1ad5bbdd: Status 404 returned error can't find the container with id f7380f7bcd7afaf3c88396ba057a4f0560510fb6e54b34316abda7eb1ad5bbdd Jan 06 08:20:48 crc kubenswrapper[4784]: I0106 08:20:48.812835 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" event={"ID":"9a988ba3-a694-4d2d-af11-cc5fb16b8883","Type":"ContainerStarted","Data":"acde24d2339805d1d8a2c6ce0761105742de74e268060f26d24934c0a9824362"} Jan 06 08:20:48 crc kubenswrapper[4784]: I0106 08:20:48.814052 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:20:48 crc kubenswrapper[4784]: I0106 08:20:48.814266 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" event={"ID":"9a988ba3-a694-4d2d-af11-cc5fb16b8883","Type":"ContainerStarted","Data":"f7380f7bcd7afaf3c88396ba057a4f0560510fb6e54b34316abda7eb1ad5bbdd"} Jan 06 08:20:48 crc kubenswrapper[4784]: I0106 08:20:48.881804 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" podStartSLOduration=1.881776839 podStartE2EDuration="1.881776839s" podCreationTimestamp="2026-01-06 08:20:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:20:48.879513513 +0000 UTC m=+350.925686360" watchObservedRunningTime="2026-01-06 08:20:48.881776839 +0000 UTC m=+350.927949716" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.187419 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-282gk"] Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.188843 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-282gk" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.191624 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.201939 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-282gk"] Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.285097 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3467c4e4-9bca-403c-b618-cd6db316a863-catalog-content\") pod \"redhat-marketplace-282gk\" (UID: \"3467c4e4-9bca-403c-b618-cd6db316a863\") " pod="openshift-marketplace/redhat-marketplace-282gk" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.285216 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjk96\" (UniqueName: \"kubernetes.io/projected/3467c4e4-9bca-403c-b618-cd6db316a863-kube-api-access-wjk96\") pod \"redhat-marketplace-282gk\" (UID: \"3467c4e4-9bca-403c-b618-cd6db316a863\") " pod="openshift-marketplace/redhat-marketplace-282gk" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.285310 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3467c4e4-9bca-403c-b618-cd6db316a863-utilities\") pod \"redhat-marketplace-282gk\" (UID: \"3467c4e4-9bca-403c-b618-cd6db316a863\") " pod="openshift-marketplace/redhat-marketplace-282gk" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.386838 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjk96\" (UniqueName: \"kubernetes.io/projected/3467c4e4-9bca-403c-b618-cd6db316a863-kube-api-access-wjk96\") pod \"redhat-marketplace-282gk\" (UID: \"3467c4e4-9bca-403c-b618-cd6db316a863\") " pod="openshift-marketplace/redhat-marketplace-282gk" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.387474 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3467c4e4-9bca-403c-b618-cd6db316a863-utilities\") pod \"redhat-marketplace-282gk\" (UID: \"3467c4e4-9bca-403c-b618-cd6db316a863\") " pod="openshift-marketplace/redhat-marketplace-282gk" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.387909 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3467c4e4-9bca-403c-b618-cd6db316a863-catalog-content\") pod \"redhat-marketplace-282gk\" (UID: \"3467c4e4-9bca-403c-b618-cd6db316a863\") " pod="openshift-marketplace/redhat-marketplace-282gk" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.388769 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3467c4e4-9bca-403c-b618-cd6db316a863-utilities\") pod \"redhat-marketplace-282gk\" (UID: \"3467c4e4-9bca-403c-b618-cd6db316a863\") " pod="openshift-marketplace/redhat-marketplace-282gk" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.388876 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3467c4e4-9bca-403c-b618-cd6db316a863-catalog-content\") pod \"redhat-marketplace-282gk\" (UID: \"3467c4e4-9bca-403c-b618-cd6db316a863\") " pod="openshift-marketplace/redhat-marketplace-282gk" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.403207 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-btpfr"] Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.405770 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-btpfr" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.410036 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.418374 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-btpfr"] Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.424521 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjk96\" (UniqueName: \"kubernetes.io/projected/3467c4e4-9bca-403c-b618-cd6db316a863-kube-api-access-wjk96\") pod \"redhat-marketplace-282gk\" (UID: \"3467c4e4-9bca-403c-b618-cd6db316a863\") " pod="openshift-marketplace/redhat-marketplace-282gk" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.515002 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-282gk" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.591982 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6v7bt\" (UniqueName: \"kubernetes.io/projected/459288c9-529b-4c96-8547-522e2e07cbb9-kube-api-access-6v7bt\") pod \"redhat-operators-btpfr\" (UID: \"459288c9-529b-4c96-8547-522e2e07cbb9\") " pod="openshift-marketplace/redhat-operators-btpfr" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.592183 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/459288c9-529b-4c96-8547-522e2e07cbb9-catalog-content\") pod \"redhat-operators-btpfr\" (UID: \"459288c9-529b-4c96-8547-522e2e07cbb9\") " pod="openshift-marketplace/redhat-operators-btpfr" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.592273 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/459288c9-529b-4c96-8547-522e2e07cbb9-utilities\") pod \"redhat-operators-btpfr\" (UID: \"459288c9-529b-4c96-8547-522e2e07cbb9\") " pod="openshift-marketplace/redhat-operators-btpfr" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.643662 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2"] Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.646988 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" podUID="ce594f68-aefa-46e6-b8df-ae03438daca5" containerName="controller-manager" containerID="cri-o://45f3160143ba5d09845958e276c2bb175fef9bfc5c5e77c7e2ef22c5ec4a5602" gracePeriod=30 Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.693092 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/459288c9-529b-4c96-8547-522e2e07cbb9-catalog-content\") pod \"redhat-operators-btpfr\" (UID: \"459288c9-529b-4c96-8547-522e2e07cbb9\") " pod="openshift-marketplace/redhat-operators-btpfr" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.693128 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/459288c9-529b-4c96-8547-522e2e07cbb9-utilities\") pod \"redhat-operators-btpfr\" (UID: \"459288c9-529b-4c96-8547-522e2e07cbb9\") " pod="openshift-marketplace/redhat-operators-btpfr" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.693192 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6v7bt\" (UniqueName: \"kubernetes.io/projected/459288c9-529b-4c96-8547-522e2e07cbb9-kube-api-access-6v7bt\") pod \"redhat-operators-btpfr\" (UID: \"459288c9-529b-4c96-8547-522e2e07cbb9\") " pod="openshift-marketplace/redhat-operators-btpfr" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.693812 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/459288c9-529b-4c96-8547-522e2e07cbb9-catalog-content\") pod \"redhat-operators-btpfr\" (UID: \"459288c9-529b-4c96-8547-522e2e07cbb9\") " pod="openshift-marketplace/redhat-operators-btpfr" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.693969 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/459288c9-529b-4c96-8547-522e2e07cbb9-utilities\") pod \"redhat-operators-btpfr\" (UID: \"459288c9-529b-4c96-8547-522e2e07cbb9\") " pod="openshift-marketplace/redhat-operators-btpfr" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.729382 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6v7bt\" (UniqueName: \"kubernetes.io/projected/459288c9-529b-4c96-8547-522e2e07cbb9-kube-api-access-6v7bt\") pod \"redhat-operators-btpfr\" (UID: \"459288c9-529b-4c96-8547-522e2e07cbb9\") " pod="openshift-marketplace/redhat-operators-btpfr" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.758855 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-btpfr" Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.809792 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-282gk"] Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.818324 4784 generic.go:334] "Generic (PLEG): container finished" podID="ce594f68-aefa-46e6-b8df-ae03438daca5" containerID="45f3160143ba5d09845958e276c2bb175fef9bfc5c5e77c7e2ef22c5ec4a5602" exitCode=0 Jan 06 08:20:49 crc kubenswrapper[4784]: I0106 08:20:49.818959 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" event={"ID":"ce594f68-aefa-46e6-b8df-ae03438daca5","Type":"ContainerDied","Data":"45f3160143ba5d09845958e276c2bb175fef9bfc5c5e77c7e2ef22c5ec4a5602"} Jan 06 08:20:49 crc kubenswrapper[4784]: W0106 08:20:49.824566 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3467c4e4_9bca_403c_b618_cd6db316a863.slice/crio-62ac06ebae0e53dc58e1f3b1a1d68f760afc837849659e953b463f7790f1c739 WatchSource:0}: Error finding container 62ac06ebae0e53dc58e1f3b1a1d68f760afc837849659e953b463f7790f1c739: Status 404 returned error can't find the container with id 62ac06ebae0e53dc58e1f3b1a1d68f760afc837849659e953b463f7790f1c739 Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.010700 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-btpfr"] Jan 06 08:20:50 crc kubenswrapper[4784]: W0106 08:20:50.032687 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod459288c9_529b_4c96_8547_522e2e07cbb9.slice/crio-5da95157a6fd1a5a92d71a9ba818c4f980e5af09c037d90d31fa80d4ef8621a3 WatchSource:0}: Error finding container 5da95157a6fd1a5a92d71a9ba818c4f980e5af09c037d90d31fa80d4ef8621a3: Status 404 returned error can't find the container with id 5da95157a6fd1a5a92d71a9ba818c4f980e5af09c037d90d31fa80d4ef8621a3 Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.163628 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.301404 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-proxy-ca-bundles\") pod \"ce594f68-aefa-46e6-b8df-ae03438daca5\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.301475 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-client-ca\") pod \"ce594f68-aefa-46e6-b8df-ae03438daca5\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.301586 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-config\") pod \"ce594f68-aefa-46e6-b8df-ae03438daca5\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.301629 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dfjcg\" (UniqueName: \"kubernetes.io/projected/ce594f68-aefa-46e6-b8df-ae03438daca5-kube-api-access-dfjcg\") pod \"ce594f68-aefa-46e6-b8df-ae03438daca5\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.301662 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce594f68-aefa-46e6-b8df-ae03438daca5-serving-cert\") pod \"ce594f68-aefa-46e6-b8df-ae03438daca5\" (UID: \"ce594f68-aefa-46e6-b8df-ae03438daca5\") " Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.302943 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "ce594f68-aefa-46e6-b8df-ae03438daca5" (UID: "ce594f68-aefa-46e6-b8df-ae03438daca5"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.303017 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-client-ca" (OuterVolumeSpecName: "client-ca") pod "ce594f68-aefa-46e6-b8df-ae03438daca5" (UID: "ce594f68-aefa-46e6-b8df-ae03438daca5"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.303054 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-config" (OuterVolumeSpecName: "config") pod "ce594f68-aefa-46e6-b8df-ae03438daca5" (UID: "ce594f68-aefa-46e6-b8df-ae03438daca5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.307499 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce594f68-aefa-46e6-b8df-ae03438daca5-kube-api-access-dfjcg" (OuterVolumeSpecName: "kube-api-access-dfjcg") pod "ce594f68-aefa-46e6-b8df-ae03438daca5" (UID: "ce594f68-aefa-46e6-b8df-ae03438daca5"). InnerVolumeSpecName "kube-api-access-dfjcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.307536 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce594f68-aefa-46e6-b8df-ae03438daca5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ce594f68-aefa-46e6-b8df-ae03438daca5" (UID: "ce594f68-aefa-46e6-b8df-ae03438daca5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.404215 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.404291 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dfjcg\" (UniqueName: \"kubernetes.io/projected/ce594f68-aefa-46e6-b8df-ae03438daca5-kube-api-access-dfjcg\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.404324 4784 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce594f68-aefa-46e6-b8df-ae03438daca5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.404355 4784 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.404382 4784 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ce594f68-aefa-46e6-b8df-ae03438daca5-client-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.827320 4784 generic.go:334] "Generic (PLEG): container finished" podID="459288c9-529b-4c96-8547-522e2e07cbb9" containerID="3d87727fee49c5ceda8d2f7c0b693036ac1ccfafde8ba205acce90fb4741df49" exitCode=0 Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.827401 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-btpfr" event={"ID":"459288c9-529b-4c96-8547-522e2e07cbb9","Type":"ContainerDied","Data":"3d87727fee49c5ceda8d2f7c0b693036ac1ccfafde8ba205acce90fb4741df49"} Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.828884 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-btpfr" event={"ID":"459288c9-529b-4c96-8547-522e2e07cbb9","Type":"ContainerStarted","Data":"5da95157a6fd1a5a92d71a9ba818c4f980e5af09c037d90d31fa80d4ef8621a3"} Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.831697 4784 generic.go:334] "Generic (PLEG): container finished" podID="3467c4e4-9bca-403c-b618-cd6db316a863" containerID="057ec2ead56a7cbc10c4a176cea7d229c3c38903e1037b3588a56a33a83a4448" exitCode=0 Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.831826 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-282gk" event={"ID":"3467c4e4-9bca-403c-b618-cd6db316a863","Type":"ContainerDied","Data":"057ec2ead56a7cbc10c4a176cea7d229c3c38903e1037b3588a56a33a83a4448"} Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.831874 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-282gk" event={"ID":"3467c4e4-9bca-403c-b618-cd6db316a863","Type":"ContainerStarted","Data":"62ac06ebae0e53dc58e1f3b1a1d68f760afc837849659e953b463f7790f1c739"} Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.836372 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" event={"ID":"ce594f68-aefa-46e6-b8df-ae03438daca5","Type":"ContainerDied","Data":"f9ebc94cf6a8164b2b0d05a5a5afe6a9c5c435c8d03a016f6239afbfe9b123aa"} Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.836414 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2" Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.836460 4784 scope.go:117] "RemoveContainer" containerID="45f3160143ba5d09845958e276c2bb175fef9bfc5c5e77c7e2ef22c5ec4a5602" Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.876882 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2"] Jan 06 08:20:50 crc kubenswrapper[4784]: I0106 08:20:50.881458 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-bd8d95bb7-gd7j2"] Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.594634 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4qddw"] Jan 06 08:20:51 crc kubenswrapper[4784]: E0106 08:20:51.595665 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce594f68-aefa-46e6-b8df-ae03438daca5" containerName="controller-manager" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.595708 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce594f68-aefa-46e6-b8df-ae03438daca5" containerName="controller-manager" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.595960 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce594f68-aefa-46e6-b8df-ae03438daca5" containerName="controller-manager" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.597583 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4qddw" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.607269 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.611996 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-84dc98465d-wffx2"] Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.613028 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.616502 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.616714 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.621485 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.621511 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.621715 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.621759 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.625156 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.630862 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-84dc98465d-wffx2"] Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.636612 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4qddw"] Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.723931 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9125c3e3-6ed0-43d4-927b-a78c0359f640-config\") pod \"controller-manager-84dc98465d-wffx2\" (UID: \"9125c3e3-6ed0-43d4-927b-a78c0359f640\") " pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.724028 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bab3a775-7540-4d7a-8ec1-c954a0f0fd08-catalog-content\") pod \"community-operators-4qddw\" (UID: \"bab3a775-7540-4d7a-8ec1-c954a0f0fd08\") " pod="openshift-marketplace/community-operators-4qddw" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.724074 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zppqf\" (UniqueName: \"kubernetes.io/projected/bab3a775-7540-4d7a-8ec1-c954a0f0fd08-kube-api-access-zppqf\") pod \"community-operators-4qddw\" (UID: \"bab3a775-7540-4d7a-8ec1-c954a0f0fd08\") " pod="openshift-marketplace/community-operators-4qddw" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.724098 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9125c3e3-6ed0-43d4-927b-a78c0359f640-proxy-ca-bundles\") pod \"controller-manager-84dc98465d-wffx2\" (UID: \"9125c3e3-6ed0-43d4-927b-a78c0359f640\") " pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.724121 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9125c3e3-6ed0-43d4-927b-a78c0359f640-client-ca\") pod \"controller-manager-84dc98465d-wffx2\" (UID: \"9125c3e3-6ed0-43d4-927b-a78c0359f640\") " pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.724181 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knvz6\" (UniqueName: \"kubernetes.io/projected/9125c3e3-6ed0-43d4-927b-a78c0359f640-kube-api-access-knvz6\") pod \"controller-manager-84dc98465d-wffx2\" (UID: \"9125c3e3-6ed0-43d4-927b-a78c0359f640\") " pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.724304 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9125c3e3-6ed0-43d4-927b-a78c0359f640-serving-cert\") pod \"controller-manager-84dc98465d-wffx2\" (UID: \"9125c3e3-6ed0-43d4-927b-a78c0359f640\") " pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.724344 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bab3a775-7540-4d7a-8ec1-c954a0f0fd08-utilities\") pod \"community-operators-4qddw\" (UID: \"bab3a775-7540-4d7a-8ec1-c954a0f0fd08\") " pod="openshift-marketplace/community-operators-4qddw" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.788784 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c9kbb"] Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.792533 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c9kbb" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.794218 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c9kbb"] Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.794703 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.825760 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9125c3e3-6ed0-43d4-927b-a78c0359f640-serving-cert\") pod \"controller-manager-84dc98465d-wffx2\" (UID: \"9125c3e3-6ed0-43d4-927b-a78c0359f640\") " pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.825825 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bab3a775-7540-4d7a-8ec1-c954a0f0fd08-utilities\") pod \"community-operators-4qddw\" (UID: \"bab3a775-7540-4d7a-8ec1-c954a0f0fd08\") " pod="openshift-marketplace/community-operators-4qddw" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.825855 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9125c3e3-6ed0-43d4-927b-a78c0359f640-config\") pod \"controller-manager-84dc98465d-wffx2\" (UID: \"9125c3e3-6ed0-43d4-927b-a78c0359f640\") " pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.825891 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bab3a775-7540-4d7a-8ec1-c954a0f0fd08-catalog-content\") pod \"community-operators-4qddw\" (UID: \"bab3a775-7540-4d7a-8ec1-c954a0f0fd08\") " pod="openshift-marketplace/community-operators-4qddw" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.825932 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zppqf\" (UniqueName: \"kubernetes.io/projected/bab3a775-7540-4d7a-8ec1-c954a0f0fd08-kube-api-access-zppqf\") pod \"community-operators-4qddw\" (UID: \"bab3a775-7540-4d7a-8ec1-c954a0f0fd08\") " pod="openshift-marketplace/community-operators-4qddw" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.825955 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9125c3e3-6ed0-43d4-927b-a78c0359f640-proxy-ca-bundles\") pod \"controller-manager-84dc98465d-wffx2\" (UID: \"9125c3e3-6ed0-43d4-927b-a78c0359f640\") " pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.825978 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9125c3e3-6ed0-43d4-927b-a78c0359f640-client-ca\") pod \"controller-manager-84dc98465d-wffx2\" (UID: \"9125c3e3-6ed0-43d4-927b-a78c0359f640\") " pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.826006 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knvz6\" (UniqueName: \"kubernetes.io/projected/9125c3e3-6ed0-43d4-927b-a78c0359f640-kube-api-access-knvz6\") pod \"controller-manager-84dc98465d-wffx2\" (UID: \"9125c3e3-6ed0-43d4-927b-a78c0359f640\") " pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.826363 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bab3a775-7540-4d7a-8ec1-c954a0f0fd08-utilities\") pod \"community-operators-4qddw\" (UID: \"bab3a775-7540-4d7a-8ec1-c954a0f0fd08\") " pod="openshift-marketplace/community-operators-4qddw" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.827079 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9125c3e3-6ed0-43d4-927b-a78c0359f640-client-ca\") pod \"controller-manager-84dc98465d-wffx2\" (UID: \"9125c3e3-6ed0-43d4-927b-a78c0359f640\") " pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.827296 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9125c3e3-6ed0-43d4-927b-a78c0359f640-proxy-ca-bundles\") pod \"controller-manager-84dc98465d-wffx2\" (UID: \"9125c3e3-6ed0-43d4-927b-a78c0359f640\") " pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.827330 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9125c3e3-6ed0-43d4-927b-a78c0359f640-config\") pod \"controller-manager-84dc98465d-wffx2\" (UID: \"9125c3e3-6ed0-43d4-927b-a78c0359f640\") " pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.827766 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bab3a775-7540-4d7a-8ec1-c954a0f0fd08-catalog-content\") pod \"community-operators-4qddw\" (UID: \"bab3a775-7540-4d7a-8ec1-c954a0f0fd08\") " pod="openshift-marketplace/community-operators-4qddw" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.834862 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9125c3e3-6ed0-43d4-927b-a78c0359f640-serving-cert\") pod \"controller-manager-84dc98465d-wffx2\" (UID: \"9125c3e3-6ed0-43d4-927b-a78c0359f640\") " pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.856402 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knvz6\" (UniqueName: \"kubernetes.io/projected/9125c3e3-6ed0-43d4-927b-a78c0359f640-kube-api-access-knvz6\") pod \"controller-manager-84dc98465d-wffx2\" (UID: \"9125c3e3-6ed0-43d4-927b-a78c0359f640\") " pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.861176 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-btpfr" event={"ID":"459288c9-529b-4c96-8547-522e2e07cbb9","Type":"ContainerStarted","Data":"33e86d0dbb62d7a413d92f2dffa2f95fd6a71dabc12a1eca447152682864710d"} Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.863962 4784 generic.go:334] "Generic (PLEG): container finished" podID="3467c4e4-9bca-403c-b618-cd6db316a863" containerID="9d8be726014b3bc0b5a7105eb147e01ae262c3c2cb5ccfffd497a36c6769125d" exitCode=0 Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.864069 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-282gk" event={"ID":"3467c4e4-9bca-403c-b618-cd6db316a863","Type":"ContainerDied","Data":"9d8be726014b3bc0b5a7105eb147e01ae262c3c2cb5ccfffd497a36c6769125d"} Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.875359 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zppqf\" (UniqueName: \"kubernetes.io/projected/bab3a775-7540-4d7a-8ec1-c954a0f0fd08-kube-api-access-zppqf\") pod \"community-operators-4qddw\" (UID: \"bab3a775-7540-4d7a-8ec1-c954a0f0fd08\") " pod="openshift-marketplace/community-operators-4qddw" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.926752 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52672fc6-6a1b-4b75-aca8-c7208aabcfe8-utilities\") pod \"certified-operators-c9kbb\" (UID: \"52672fc6-6a1b-4b75-aca8-c7208aabcfe8\") " pod="openshift-marketplace/certified-operators-c9kbb" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.926983 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52672fc6-6a1b-4b75-aca8-c7208aabcfe8-catalog-content\") pod \"certified-operators-c9kbb\" (UID: \"52672fc6-6a1b-4b75-aca8-c7208aabcfe8\") " pod="openshift-marketplace/certified-operators-c9kbb" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.927238 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skhw9\" (UniqueName: \"kubernetes.io/projected/52672fc6-6a1b-4b75-aca8-c7208aabcfe8-kube-api-access-skhw9\") pod \"certified-operators-c9kbb\" (UID: \"52672fc6-6a1b-4b75-aca8-c7208aabcfe8\") " pod="openshift-marketplace/certified-operators-c9kbb" Jan 06 08:20:51 crc kubenswrapper[4784]: I0106 08:20:51.969469 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4qddw" Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.005979 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.028995 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52672fc6-6a1b-4b75-aca8-c7208aabcfe8-utilities\") pod \"certified-operators-c9kbb\" (UID: \"52672fc6-6a1b-4b75-aca8-c7208aabcfe8\") " pod="openshift-marketplace/certified-operators-c9kbb" Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.029137 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52672fc6-6a1b-4b75-aca8-c7208aabcfe8-catalog-content\") pod \"certified-operators-c9kbb\" (UID: \"52672fc6-6a1b-4b75-aca8-c7208aabcfe8\") " pod="openshift-marketplace/certified-operators-c9kbb" Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.029220 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skhw9\" (UniqueName: \"kubernetes.io/projected/52672fc6-6a1b-4b75-aca8-c7208aabcfe8-kube-api-access-skhw9\") pod \"certified-operators-c9kbb\" (UID: \"52672fc6-6a1b-4b75-aca8-c7208aabcfe8\") " pod="openshift-marketplace/certified-operators-c9kbb" Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.030189 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52672fc6-6a1b-4b75-aca8-c7208aabcfe8-utilities\") pod \"certified-operators-c9kbb\" (UID: \"52672fc6-6a1b-4b75-aca8-c7208aabcfe8\") " pod="openshift-marketplace/certified-operators-c9kbb" Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.031399 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52672fc6-6a1b-4b75-aca8-c7208aabcfe8-catalog-content\") pod \"certified-operators-c9kbb\" (UID: \"52672fc6-6a1b-4b75-aca8-c7208aabcfe8\") " pod="openshift-marketplace/certified-operators-c9kbb" Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.050020 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skhw9\" (UniqueName: \"kubernetes.io/projected/52672fc6-6a1b-4b75-aca8-c7208aabcfe8-kube-api-access-skhw9\") pod \"certified-operators-c9kbb\" (UID: \"52672fc6-6a1b-4b75-aca8-c7208aabcfe8\") " pod="openshift-marketplace/certified-operators-c9kbb" Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.204093 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c9kbb" Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.216200 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4qddw"] Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.249765 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-84dc98465d-wffx2"] Jan 06 08:20:52 crc kubenswrapper[4784]: W0106 08:20:52.258590 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9125c3e3_6ed0_43d4_927b_a78c0359f640.slice/crio-96f04e5d47bc07513758d91f5e2c499bed640cc98c82d975f427a3c83d87506d WatchSource:0}: Error finding container 96f04e5d47bc07513758d91f5e2c499bed640cc98c82d975f427a3c83d87506d: Status 404 returned error can't find the container with id 96f04e5d47bc07513758d91f5e2c499bed640cc98c82d975f427a3c83d87506d Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.337214 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce594f68-aefa-46e6-b8df-ae03438daca5" path="/var/lib/kubelet/pods/ce594f68-aefa-46e6-b8df-ae03438daca5/volumes" Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.420143 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c9kbb"] Jan 06 08:20:52 crc kubenswrapper[4784]: W0106 08:20:52.445882 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52672fc6_6a1b_4b75_aca8_c7208aabcfe8.slice/crio-48a221803fb2753a3319d3dec1d24be9b8cc7f37052862d8cff0b631f5575a51 WatchSource:0}: Error finding container 48a221803fb2753a3319d3dec1d24be9b8cc7f37052862d8cff0b631f5575a51: Status 404 returned error can't find the container with id 48a221803fb2753a3319d3dec1d24be9b8cc7f37052862d8cff0b631f5575a51 Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.872821 4784 generic.go:334] "Generic (PLEG): container finished" podID="bab3a775-7540-4d7a-8ec1-c954a0f0fd08" containerID="010ba7d1c6278010adf9bdf6b1976e94b316958d5d3b94a40a9caf0b2c1c892b" exitCode=0 Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.872924 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4qddw" event={"ID":"bab3a775-7540-4d7a-8ec1-c954a0f0fd08","Type":"ContainerDied","Data":"010ba7d1c6278010adf9bdf6b1976e94b316958d5d3b94a40a9caf0b2c1c892b"} Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.872978 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4qddw" event={"ID":"bab3a775-7540-4d7a-8ec1-c954a0f0fd08","Type":"ContainerStarted","Data":"d70cbc4e339bca7284827803410da0058b2e52bed7d46fd95897709f9d21eba7"} Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.876718 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-282gk" event={"ID":"3467c4e4-9bca-403c-b618-cd6db316a863","Type":"ContainerStarted","Data":"0a7e0caa2716c875df4a86747e86fa3eed5eb3003cba252a8a195f6764e9427c"} Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.879754 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" event={"ID":"9125c3e3-6ed0-43d4-927b-a78c0359f640","Type":"ContainerStarted","Data":"cee97e341e89acd6dcd3fd1f10729fcd420482ee2bfd4da397ea234b7ac6b009"} Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.879792 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" event={"ID":"9125c3e3-6ed0-43d4-927b-a78c0359f640","Type":"ContainerStarted","Data":"96f04e5d47bc07513758d91f5e2c499bed640cc98c82d975f427a3c83d87506d"} Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.880141 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.882675 4784 generic.go:334] "Generic (PLEG): container finished" podID="459288c9-529b-4c96-8547-522e2e07cbb9" containerID="33e86d0dbb62d7a413d92f2dffa2f95fd6a71dabc12a1eca447152682864710d" exitCode=0 Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.882750 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-btpfr" event={"ID":"459288c9-529b-4c96-8547-522e2e07cbb9","Type":"ContainerDied","Data":"33e86d0dbb62d7a413d92f2dffa2f95fd6a71dabc12a1eca447152682864710d"} Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.884564 4784 generic.go:334] "Generic (PLEG): container finished" podID="52672fc6-6a1b-4b75-aca8-c7208aabcfe8" containerID="e663bb5053f9b11b42e1da6af5ca2a86979cd54e60e99f9e81506783f94bd793" exitCode=0 Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.884594 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c9kbb" event={"ID":"52672fc6-6a1b-4b75-aca8-c7208aabcfe8","Type":"ContainerDied","Data":"e663bb5053f9b11b42e1da6af5ca2a86979cd54e60e99f9e81506783f94bd793"} Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.884612 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c9kbb" event={"ID":"52672fc6-6a1b-4b75-aca8-c7208aabcfe8","Type":"ContainerStarted","Data":"48a221803fb2753a3319d3dec1d24be9b8cc7f37052862d8cff0b631f5575a51"} Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.888188 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.935666 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-282gk" podStartSLOduration=2.444948124 podStartE2EDuration="3.935645441s" podCreationTimestamp="2026-01-06 08:20:49 +0000 UTC" firstStartedPulling="2026-01-06 08:20:50.835176243 +0000 UTC m=+352.881349090" lastFinishedPulling="2026-01-06 08:20:52.32587355 +0000 UTC m=+354.372046407" observedRunningTime="2026-01-06 08:20:52.931060739 +0000 UTC m=+354.977233586" watchObservedRunningTime="2026-01-06 08:20:52.935645441 +0000 UTC m=+354.981818278" Jan 06 08:20:52 crc kubenswrapper[4784]: I0106 08:20:52.961988 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-84dc98465d-wffx2" podStartSLOduration=3.961969526 podStartE2EDuration="3.961969526s" podCreationTimestamp="2026-01-06 08:20:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:20:52.959280209 +0000 UTC m=+355.005453046" watchObservedRunningTime="2026-01-06 08:20:52.961969526 +0000 UTC m=+355.008142363" Jan 06 08:20:53 crc kubenswrapper[4784]: I0106 08:20:53.891989 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-btpfr" event={"ID":"459288c9-529b-4c96-8547-522e2e07cbb9","Type":"ContainerStarted","Data":"42d824e1757a9bfef501c5d855e5614969b7df1b68882a796cd28e54c24b8eeb"} Jan 06 08:20:53 crc kubenswrapper[4784]: I0106 08:20:53.894286 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c9kbb" event={"ID":"52672fc6-6a1b-4b75-aca8-c7208aabcfe8","Type":"ContainerStarted","Data":"a2583867814f17617db44bae43cc3962156a6fa186db31300830c1fc814daf73"} Jan 06 08:20:53 crc kubenswrapper[4784]: I0106 08:20:53.896314 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4qddw" event={"ID":"bab3a775-7540-4d7a-8ec1-c954a0f0fd08","Type":"ContainerStarted","Data":"124b8711da8b88b3ef60fc97286edd79ec60a3a64d5a046fbd3d2d2fd92d2f9b"} Jan 06 08:20:53 crc kubenswrapper[4784]: I0106 08:20:53.940417 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-btpfr" podStartSLOduration=2.443067989 podStartE2EDuration="4.940398243s" podCreationTimestamp="2026-01-06 08:20:49 +0000 UTC" firstStartedPulling="2026-01-06 08:20:50.829825299 +0000 UTC m=+352.875998176" lastFinishedPulling="2026-01-06 08:20:53.327155573 +0000 UTC m=+355.373328430" observedRunningTime="2026-01-06 08:20:53.915063446 +0000 UTC m=+355.961236283" watchObservedRunningTime="2026-01-06 08:20:53.940398243 +0000 UTC m=+355.986571080" Jan 06 08:20:54 crc kubenswrapper[4784]: I0106 08:20:54.912081 4784 generic.go:334] "Generic (PLEG): container finished" podID="52672fc6-6a1b-4b75-aca8-c7208aabcfe8" containerID="a2583867814f17617db44bae43cc3962156a6fa186db31300830c1fc814daf73" exitCode=0 Jan 06 08:20:54 crc kubenswrapper[4784]: I0106 08:20:54.912419 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c9kbb" event={"ID":"52672fc6-6a1b-4b75-aca8-c7208aabcfe8","Type":"ContainerDied","Data":"a2583867814f17617db44bae43cc3962156a6fa186db31300830c1fc814daf73"} Jan 06 08:20:54 crc kubenswrapper[4784]: I0106 08:20:54.915778 4784 generic.go:334] "Generic (PLEG): container finished" podID="bab3a775-7540-4d7a-8ec1-c954a0f0fd08" containerID="124b8711da8b88b3ef60fc97286edd79ec60a3a64d5a046fbd3d2d2fd92d2f9b" exitCode=0 Jan 06 08:20:54 crc kubenswrapper[4784]: I0106 08:20:54.916951 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4qddw" event={"ID":"bab3a775-7540-4d7a-8ec1-c954a0f0fd08","Type":"ContainerDied","Data":"124b8711da8b88b3ef60fc97286edd79ec60a3a64d5a046fbd3d2d2fd92d2f9b"} Jan 06 08:20:54 crc kubenswrapper[4784]: I0106 08:20:54.916982 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4qddw" event={"ID":"bab3a775-7540-4d7a-8ec1-c954a0f0fd08","Type":"ContainerStarted","Data":"598fd7d7354cdb97344b2a3600ce625c53741cec12fc9c4f141eb73e6d8606eb"} Jan 06 08:20:54 crc kubenswrapper[4784]: I0106 08:20:54.955548 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4qddw" podStartSLOduration=2.213442817 podStartE2EDuration="3.955528114s" podCreationTimestamp="2026-01-06 08:20:51 +0000 UTC" firstStartedPulling="2026-01-06 08:20:52.874955557 +0000 UTC m=+354.921128394" lastFinishedPulling="2026-01-06 08:20:54.617040864 +0000 UTC m=+356.663213691" observedRunningTime="2026-01-06 08:20:54.95396743 +0000 UTC m=+357.000140267" watchObservedRunningTime="2026-01-06 08:20:54.955528114 +0000 UTC m=+357.001700951" Jan 06 08:20:55 crc kubenswrapper[4784]: I0106 08:20:55.923818 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c9kbb" event={"ID":"52672fc6-6a1b-4b75-aca8-c7208aabcfe8","Type":"ContainerStarted","Data":"e7baeba25d4b6dcc1bebbbcd4a065e4dc0d657b911c9967b0a0c47a9fe7e84ec"} Jan 06 08:20:55 crc kubenswrapper[4784]: I0106 08:20:55.948336 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c9kbb" podStartSLOduration=2.432575952 podStartE2EDuration="4.948314034s" podCreationTimestamp="2026-01-06 08:20:51 +0000 UTC" firstStartedPulling="2026-01-06 08:20:52.886756497 +0000 UTC m=+354.932929334" lastFinishedPulling="2026-01-06 08:20:55.402494579 +0000 UTC m=+357.448667416" observedRunningTime="2026-01-06 08:20:55.943516725 +0000 UTC m=+357.989689572" watchObservedRunningTime="2026-01-06 08:20:55.948314034 +0000 UTC m=+357.994486881" Jan 06 08:20:59 crc kubenswrapper[4784]: I0106 08:20:59.516485 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-282gk" Jan 06 08:20:59 crc kubenswrapper[4784]: I0106 08:20:59.517157 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-282gk" Jan 06 08:20:59 crc kubenswrapper[4784]: I0106 08:20:59.568722 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-282gk" Jan 06 08:20:59 crc kubenswrapper[4784]: I0106 08:20:59.760149 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-btpfr" Jan 06 08:20:59 crc kubenswrapper[4784]: I0106 08:20:59.760444 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-btpfr" Jan 06 08:20:59 crc kubenswrapper[4784]: I0106 08:20:59.807697 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-btpfr" Jan 06 08:21:00 crc kubenswrapper[4784]: I0106 08:21:00.021068 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-282gk" Jan 06 08:21:00 crc kubenswrapper[4784]: I0106 08:21:00.021234 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-btpfr" Jan 06 08:21:01 crc kubenswrapper[4784]: I0106 08:21:01.970324 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4qddw" Jan 06 08:21:01 crc kubenswrapper[4784]: I0106 08:21:01.970492 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4qddw" Jan 06 08:21:02 crc kubenswrapper[4784]: I0106 08:21:02.024050 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4qddw" Jan 06 08:21:02 crc kubenswrapper[4784]: I0106 08:21:02.206859 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c9kbb" Jan 06 08:21:02 crc kubenswrapper[4784]: I0106 08:21:02.206920 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c9kbb" Jan 06 08:21:02 crc kubenswrapper[4784]: I0106 08:21:02.242451 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c9kbb" Jan 06 08:21:03 crc kubenswrapper[4784]: I0106 08:21:03.006907 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c9kbb" Jan 06 08:21:03 crc kubenswrapper[4784]: I0106 08:21:03.007763 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4qddw" Jan 06 08:21:08 crc kubenswrapper[4784]: I0106 08:21:08.104729 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-2xtmv" Jan 06 08:21:08 crc kubenswrapper[4784]: I0106 08:21:08.173006 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rhgh4"] Jan 06 08:21:14 crc kubenswrapper[4784]: I0106 08:21:14.351352 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:21:14 crc kubenswrapper[4784]: I0106 08:21:14.352006 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.225179 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" podUID="90ed9938-c2d5-4d7a-9f34-b0e908f9869a" containerName="registry" containerID="cri-o://427b12d79118861579a9858f352e25ba1a3cfae947a9aecc93ddca26513e2c42" gracePeriod=30 Jan 06 08:21:33 crc kubenswrapper[4784]: E0106 08:21:33.364382 4784 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod90ed9938_c2d5_4d7a_9f34_b0e908f9869a.slice/crio-conmon-427b12d79118861579a9858f352e25ba1a3cfae947a9aecc93ddca26513e2c42.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod90ed9938_c2d5_4d7a_9f34_b0e908f9869a.slice/crio-427b12d79118861579a9858f352e25ba1a3cfae947a9aecc93ddca26513e2c42.scope\": RecentStats: unable to find data in memory cache]" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.643384 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.657043 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-registry-tls\") pod \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.657111 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6qx9\" (UniqueName: \"kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-kube-api-access-c6qx9\") pod \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.657260 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.657301 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-installation-pull-secrets\") pod \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.657347 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-ca-trust-extracted\") pod \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.657433 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-trusted-ca\") pod \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.657470 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-bound-sa-token\") pod \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.657521 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-registry-certificates\") pod \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\" (UID: \"90ed9938-c2d5-4d7a-9f34-b0e908f9869a\") " Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.658619 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "90ed9938-c2d5-4d7a-9f34-b0e908f9869a" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.659582 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "90ed9938-c2d5-4d7a-9f34-b0e908f9869a" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.666751 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "90ed9938-c2d5-4d7a-9f34-b0e908f9869a" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.667597 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-kube-api-access-c6qx9" (OuterVolumeSpecName: "kube-api-access-c6qx9") pod "90ed9938-c2d5-4d7a-9f34-b0e908f9869a" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a"). InnerVolumeSpecName "kube-api-access-c6qx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.670305 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "90ed9938-c2d5-4d7a-9f34-b0e908f9869a" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.672228 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "90ed9938-c2d5-4d7a-9f34-b0e908f9869a" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.679400 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "90ed9938-c2d5-4d7a-9f34-b0e908f9869a" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.691790 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "90ed9938-c2d5-4d7a-9f34-b0e908f9869a" (UID: "90ed9938-c2d5-4d7a-9f34-b0e908f9869a"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.758740 4784 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.759101 4784 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.759184 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6qx9\" (UniqueName: \"kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-kube-api-access-c6qx9\") on node \"crc\" DevicePath \"\"" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.759258 4784 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.759327 4784 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.759387 4784 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:21:33 crc kubenswrapper[4784]: I0106 08:21:33.759442 4784 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/90ed9938-c2d5-4d7a-9f34-b0e908f9869a-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 06 08:21:34 crc kubenswrapper[4784]: I0106 08:21:34.244377 4784 generic.go:334] "Generic (PLEG): container finished" podID="90ed9938-c2d5-4d7a-9f34-b0e908f9869a" containerID="427b12d79118861579a9858f352e25ba1a3cfae947a9aecc93ddca26513e2c42" exitCode=0 Jan 06 08:21:34 crc kubenswrapper[4784]: I0106 08:21:34.244431 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" event={"ID":"90ed9938-c2d5-4d7a-9f34-b0e908f9869a","Type":"ContainerDied","Data":"427b12d79118861579a9858f352e25ba1a3cfae947a9aecc93ddca26513e2c42"} Jan 06 08:21:34 crc kubenswrapper[4784]: I0106 08:21:34.244458 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" Jan 06 08:21:34 crc kubenswrapper[4784]: I0106 08:21:34.244466 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-rhgh4" event={"ID":"90ed9938-c2d5-4d7a-9f34-b0e908f9869a","Type":"ContainerDied","Data":"8d5ad624199617e1232c9c848dfca5d46a1101fdeaf85453ef1b91f1883a9e67"} Jan 06 08:21:34 crc kubenswrapper[4784]: I0106 08:21:34.244481 4784 scope.go:117] "RemoveContainer" containerID="427b12d79118861579a9858f352e25ba1a3cfae947a9aecc93ddca26513e2c42" Jan 06 08:21:34 crc kubenswrapper[4784]: I0106 08:21:34.278308 4784 scope.go:117] "RemoveContainer" containerID="427b12d79118861579a9858f352e25ba1a3cfae947a9aecc93ddca26513e2c42" Jan 06 08:21:34 crc kubenswrapper[4784]: E0106 08:21:34.279198 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"427b12d79118861579a9858f352e25ba1a3cfae947a9aecc93ddca26513e2c42\": container with ID starting with 427b12d79118861579a9858f352e25ba1a3cfae947a9aecc93ddca26513e2c42 not found: ID does not exist" containerID="427b12d79118861579a9858f352e25ba1a3cfae947a9aecc93ddca26513e2c42" Jan 06 08:21:34 crc kubenswrapper[4784]: I0106 08:21:34.279264 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"427b12d79118861579a9858f352e25ba1a3cfae947a9aecc93ddca26513e2c42"} err="failed to get container status \"427b12d79118861579a9858f352e25ba1a3cfae947a9aecc93ddca26513e2c42\": rpc error: code = NotFound desc = could not find container \"427b12d79118861579a9858f352e25ba1a3cfae947a9aecc93ddca26513e2c42\": container with ID starting with 427b12d79118861579a9858f352e25ba1a3cfae947a9aecc93ddca26513e2c42 not found: ID does not exist" Jan 06 08:21:34 crc kubenswrapper[4784]: I0106 08:21:34.305071 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rhgh4"] Jan 06 08:21:34 crc kubenswrapper[4784]: I0106 08:21:34.326828 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rhgh4"] Jan 06 08:21:36 crc kubenswrapper[4784]: I0106 08:21:36.323689 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90ed9938-c2d5-4d7a-9f34-b0e908f9869a" path="/var/lib/kubelet/pods/90ed9938-c2d5-4d7a-9f34-b0e908f9869a/volumes" Jan 06 08:21:44 crc kubenswrapper[4784]: I0106 08:21:44.351313 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:21:44 crc kubenswrapper[4784]: I0106 08:21:44.351873 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:22:14 crc kubenswrapper[4784]: I0106 08:22:14.350983 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:22:14 crc kubenswrapper[4784]: I0106 08:22:14.352113 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:22:14 crc kubenswrapper[4784]: I0106 08:22:14.352208 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:22:14 crc kubenswrapper[4784]: I0106 08:22:14.353773 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b6b7d740cd5be04e2d170bfab6a98c1e20b0f10be11fbce538c9869051ad40e7"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 08:22:14 crc kubenswrapper[4784]: I0106 08:22:14.353903 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://b6b7d740cd5be04e2d170bfab6a98c1e20b0f10be11fbce538c9869051ad40e7" gracePeriod=600 Jan 06 08:22:14 crc kubenswrapper[4784]: I0106 08:22:14.563952 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="b6b7d740cd5be04e2d170bfab6a98c1e20b0f10be11fbce538c9869051ad40e7" exitCode=0 Jan 06 08:22:14 crc kubenswrapper[4784]: I0106 08:22:14.564044 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"b6b7d740cd5be04e2d170bfab6a98c1e20b0f10be11fbce538c9869051ad40e7"} Jan 06 08:22:14 crc kubenswrapper[4784]: I0106 08:22:14.564475 4784 scope.go:117] "RemoveContainer" containerID="767abf779f2831bfc50d5c0a7f608783a6aabd2624fc8d50a51717db5fc5fc9f" Jan 06 08:22:15 crc kubenswrapper[4784]: I0106 08:22:15.576040 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"40c7444c53886fd498a697f6b12ba8e4e849b37b3d1846fc709df19e375e4a8c"} Jan 06 08:24:14 crc kubenswrapper[4784]: I0106 08:24:14.351494 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:24:14 crc kubenswrapper[4784]: I0106 08:24:14.352707 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:24:44 crc kubenswrapper[4784]: I0106 08:24:44.350671 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:24:44 crc kubenswrapper[4784]: I0106 08:24:44.351602 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:25:14 crc kubenswrapper[4784]: I0106 08:25:14.351621 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:25:14 crc kubenswrapper[4784]: I0106 08:25:14.352606 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:25:14 crc kubenswrapper[4784]: I0106 08:25:14.352725 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:25:14 crc kubenswrapper[4784]: I0106 08:25:14.354013 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"40c7444c53886fd498a697f6b12ba8e4e849b37b3d1846fc709df19e375e4a8c"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 08:25:14 crc kubenswrapper[4784]: I0106 08:25:14.354376 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://40c7444c53886fd498a697f6b12ba8e4e849b37b3d1846fc709df19e375e4a8c" gracePeriod=600 Jan 06 08:25:14 crc kubenswrapper[4784]: I0106 08:25:14.905420 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="40c7444c53886fd498a697f6b12ba8e4e849b37b3d1846fc709df19e375e4a8c" exitCode=0 Jan 06 08:25:14 crc kubenswrapper[4784]: I0106 08:25:14.905486 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"40c7444c53886fd498a697f6b12ba8e4e849b37b3d1846fc709df19e375e4a8c"} Jan 06 08:25:14 crc kubenswrapper[4784]: I0106 08:25:14.905534 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"79b856f10bb460704a3c69053ad2f54af7d0f23c85d18e02491af96b533c786d"} Jan 06 08:25:14 crc kubenswrapper[4784]: I0106 08:25:14.905587 4784 scope.go:117] "RemoveContainer" containerID="b6b7d740cd5be04e2d170bfab6a98c1e20b0f10be11fbce538c9869051ad40e7" Jan 06 08:26:20 crc kubenswrapper[4784]: I0106 08:26:20.573867 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-blw4q"] Jan 06 08:26:20 crc kubenswrapper[4784]: I0106 08:26:20.575088 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovn-controller" containerID="cri-o://b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a" gracePeriod=30 Jan 06 08:26:20 crc kubenswrapper[4784]: I0106 08:26:20.575157 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="northd" containerID="cri-o://8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491" gracePeriod=30 Jan 06 08:26:20 crc kubenswrapper[4784]: I0106 08:26:20.575238 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovn-acl-logging" containerID="cri-o://1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb" gracePeriod=30 Jan 06 08:26:20 crc kubenswrapper[4784]: I0106 08:26:20.575227 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="kube-rbac-proxy-node" containerID="cri-o://f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4" gracePeriod=30 Jan 06 08:26:20 crc kubenswrapper[4784]: I0106 08:26:20.575214 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda" gracePeriod=30 Jan 06 08:26:20 crc kubenswrapper[4784]: I0106 08:26:20.575471 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="sbdb" containerID="cri-o://4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5" gracePeriod=30 Jan 06 08:26:20 crc kubenswrapper[4784]: I0106 08:26:20.575441 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="nbdb" containerID="cri-o://ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4" gracePeriod=30 Jan 06 08:26:20 crc kubenswrapper[4784]: I0106 08:26:20.622234 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" containerID="cri-o://e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4" gracePeriod=30 Jan 06 08:26:20 crc kubenswrapper[4784]: I0106 08:26:20.975839 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/3.log" Jan 06 08:26:20 crc kubenswrapper[4784]: I0106 08:26:20.978673 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovn-acl-logging/0.log" Jan 06 08:26:20 crc kubenswrapper[4784]: I0106 08:26:20.979412 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovn-controller/0.log" Jan 06 08:26:20 crc kubenswrapper[4784]: I0106 08:26:20.980048 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.031574 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-log-socket\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.031659 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-systemd-units\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.031713 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-ovnkube-script-lib\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.031761 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-node-log\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.031807 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-var-lib-cni-networks-ovn-kubernetes\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.031836 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-log-socket" (OuterVolumeSpecName: "log-socket") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.031894 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.031874 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-openvswitch\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.031942 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.031941 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-node-log" (OuterVolumeSpecName: "node-log") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032004 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lvbtl\" (UniqueName: \"kubernetes.io/projected/700c7389-9fff-4331-9d37-6af2ff592ac5-kube-api-access-lvbtl\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032014 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032055 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-kubelet\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032077 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-etc-openvswitch\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032097 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-ovnkube-config\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032133 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-cni-netd\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032143 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032151 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-var-lib-openvswitch\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032170 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032198 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032197 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-ovn\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032225 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-run-netns\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032250 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/700c7389-9fff-4331-9d37-6af2ff592ac5-ovn-node-metrics-cert\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032227 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032269 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-run-ovn-kubernetes\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032341 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-systemd\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032390 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-cni-bin\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032421 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-slash\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032453 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-env-overrides\") pod \"700c7389-9fff-4331-9d37-6af2ff592ac5\" (UID: \"700c7389-9fff-4331-9d37-6af2ff592ac5\") " Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032843 4784 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-log-socket\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032877 4784 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032898 4784 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-node-log\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032917 4784 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032935 4784 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032954 4784 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032970 4784 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032987 4784 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.033006 4784 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032284 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032709 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032744 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032753 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.032777 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.033503 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.033537 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.033591 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-slash" (OuterVolumeSpecName: "host-slash") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.045487 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-dh67j"] Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.046738 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovn-acl-logging" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.046857 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovn-acl-logging" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.046948 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="sbdb" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.047016 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="sbdb" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.047088 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="kube-rbac-proxy-node" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.047157 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="kube-rbac-proxy-node" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.047221 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.047300 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.047381 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="nbdb" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.047835 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="nbdb" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.047922 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.048070 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.048141 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="northd" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.048209 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="northd" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.048281 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="kube-rbac-proxy-ovn-metrics" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.048348 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="kube-rbac-proxy-ovn-metrics" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.048477 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90ed9938-c2d5-4d7a-9f34-b0e908f9869a" containerName="registry" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.048558 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="90ed9938-c2d5-4d7a-9f34-b0e908f9869a" containerName="registry" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.048624 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovn-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.048704 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovn-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.048780 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="kubecfg-setup" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.048842 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="kubecfg-setup" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.048905 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.048968 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.047430 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/700c7389-9fff-4331-9d37-6af2ff592ac5-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.047565 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/700c7389-9fff-4331-9d37-6af2ff592ac5-kube-api-access-lvbtl" (OuterVolumeSpecName: "kube-api-access-lvbtl") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "kube-api-access-lvbtl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.049239 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="90ed9938-c2d5-4d7a-9f34-b0e908f9869a" containerName="registry" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.049314 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.049384 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="nbdb" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.049445 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.049511 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovn-acl-logging" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.049604 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="sbdb" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.049675 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovn-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.049737 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="kube-rbac-proxy-node" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.049794 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="northd" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.050384 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="kube-rbac-proxy-ovn-metrics" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.050710 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.050906 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.051245 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.051467 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.051610 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.051755 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.052016 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerName="ovnkube-controller" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.054012 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.066781 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "700c7389-9fff-4331-9d37-6af2ff592ac5" (UID: "700c7389-9fff-4331-9d37-6af2ff592ac5"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.134674 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-cni-bin\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.134739 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-etc-openvswitch\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.134773 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-slash\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.134798 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-node-log\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.134826 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cs5v8\" (UniqueName: \"kubernetes.io/projected/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-kube-api-access-cs5v8\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.134878 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-run-netns\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.134931 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-run-ovn\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.134961 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-ovn-node-metrics-cert\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.134994 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-ovnkube-config\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135020 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-run-systemd\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135073 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-kubelet\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135102 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-env-overrides\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135121 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-cni-netd\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135197 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-var-lib-openvswitch\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135225 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-run-openvswitch\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135257 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135290 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-run-ovn-kubernetes\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135319 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-ovnkube-script-lib\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135350 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-log-socket\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135395 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-systemd-units\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135466 4784 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135491 4784 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135508 4784 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135523 4784 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-slash\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135536 4784 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135575 4784 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135593 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lvbtl\" (UniqueName: \"kubernetes.io/projected/700c7389-9fff-4331-9d37-6af2ff592ac5-kube-api-access-lvbtl\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135607 4784 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/700c7389-9fff-4331-9d37-6af2ff592ac5-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135624 4784 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135637 4784 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/700c7389-9fff-4331-9d37-6af2ff592ac5-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.135650 4784 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/700c7389-9fff-4331-9d37-6af2ff592ac5-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237264 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-node-log\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237319 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cs5v8\" (UniqueName: \"kubernetes.io/projected/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-kube-api-access-cs5v8\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237362 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-run-netns\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237403 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-run-ovn\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237430 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-ovn-node-metrics-cert\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237460 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-ovnkube-config\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237486 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-run-systemd\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237516 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-kubelet\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237533 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-env-overrides\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237565 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-cni-netd\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237583 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-var-lib-openvswitch\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237609 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-run-openvswitch\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237628 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237647 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-run-ovn-kubernetes\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237665 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-ovnkube-script-lib\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237682 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-log-socket\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237697 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-systemd-units\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237720 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-cni-bin\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237736 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-etc-openvswitch\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237753 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-slash\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237814 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-slash\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.237851 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-node-log\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.238143 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-run-netns\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.238169 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-run-ovn\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.238623 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-run-openvswitch\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.238675 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.238633 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-log-socket\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.238750 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-run-ovn-kubernetes\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.238758 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-cni-bin\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.238828 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-systemd-units\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.238869 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-cni-netd\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.238903 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-etc-openvswitch\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.238939 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-run-systemd\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.238974 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-var-lib-openvswitch\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.239008 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-host-kubelet\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.239309 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-env-overrides\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.239778 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-ovnkube-script-lib\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.239829 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-ovnkube-config\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.241930 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-ovn-node-metrics-cert\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.259736 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cs5v8\" (UniqueName: \"kubernetes.io/projected/33fc2d11-139b-40c6-bc70-52b1e45b8c0c-kube-api-access-cs5v8\") pod \"ovnkube-node-dh67j\" (UID: \"33fc2d11-139b-40c6-bc70-52b1e45b8c0c\") " pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.363426 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-l2xdd_85f24cc3-ceca-49ce-b774-32e773e72c1a/kube-multus/2.log" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.364232 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-l2xdd_85f24cc3-ceca-49ce-b774-32e773e72c1a/kube-multus/1.log" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.364274 4784 generic.go:334] "Generic (PLEG): container finished" podID="85f24cc3-ceca-49ce-b774-32e773e72c1a" containerID="64398024d93df89dcd6c9902c58da525acda8a2b395f1b0a0bf4a25d313c68a6" exitCode=2 Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.364333 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-l2xdd" event={"ID":"85f24cc3-ceca-49ce-b774-32e773e72c1a","Type":"ContainerDied","Data":"64398024d93df89dcd6c9902c58da525acda8a2b395f1b0a0bf4a25d313c68a6"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.364367 4784 scope.go:117] "RemoveContainer" containerID="3a19c109a46a0e3207cb4071747c8e1f3148870f185be59d729052b2547792c1" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.365583 4784 scope.go:117] "RemoveContainer" containerID="64398024d93df89dcd6c9902c58da525acda8a2b395f1b0a0bf4a25d313c68a6" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.365909 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-l2xdd_openshift-multus(85f24cc3-ceca-49ce-b774-32e773e72c1a)\"" pod="openshift-multus/multus-l2xdd" podUID="85f24cc3-ceca-49ce-b774-32e773e72c1a" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.369445 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovnkube-controller/3.log" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.373603 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovn-acl-logging/0.log" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.373889 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.374260 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-blw4q_700c7389-9fff-4331-9d37-6af2ff592ac5/ovn-controller/0.log" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.374939 4784 generic.go:334] "Generic (PLEG): container finished" podID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerID="e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4" exitCode=0 Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.374961 4784 generic.go:334] "Generic (PLEG): container finished" podID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerID="4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5" exitCode=0 Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.374969 4784 generic.go:334] "Generic (PLEG): container finished" podID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerID="ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4" exitCode=0 Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.374978 4784 generic.go:334] "Generic (PLEG): container finished" podID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerID="8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491" exitCode=0 Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.374984 4784 generic.go:334] "Generic (PLEG): container finished" podID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerID="7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda" exitCode=0 Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.374993 4784 generic.go:334] "Generic (PLEG): container finished" podID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerID="f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4" exitCode=0 Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375000 4784 generic.go:334] "Generic (PLEG): container finished" podID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerID="1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb" exitCode=143 Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375008 4784 generic.go:334] "Generic (PLEG): container finished" podID="700c7389-9fff-4331-9d37-6af2ff592ac5" containerID="b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a" exitCode=143 Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375025 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerDied","Data":"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375047 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerDied","Data":"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375058 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerDied","Data":"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375069 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerDied","Data":"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375079 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerDied","Data":"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375089 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerDied","Data":"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375100 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375110 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375116 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375121 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375126 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375132 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375137 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375142 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375148 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375153 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375160 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerDied","Data":"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375166 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375173 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375178 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375183 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375191 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375196 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375201 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375206 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375211 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375216 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375224 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerDied","Data":"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375232 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375238 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375244 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375249 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375254 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375259 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375264 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375268 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375273 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375278 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375284 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" event={"ID":"700c7389-9fff-4331-9d37-6af2ff592ac5","Type":"ContainerDied","Data":"29ec8b9e9521c18c57eb011b0ee97b7d548947a004cb0b44587c8211c1cf8a18"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375291 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375297 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375302 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375307 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375312 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375317 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375322 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375327 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375332 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375337 4784 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb"} Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.375420 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-blw4q" Jan 06 08:26:21 crc kubenswrapper[4784]: W0106 08:26:21.419755 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod33fc2d11_139b_40c6_bc70_52b1e45b8c0c.slice/crio-76a0377dd832db948ae974067ad5f77e80d1d01dfd6b3b9791def6d90483b5a9 WatchSource:0}: Error finding container 76a0377dd832db948ae974067ad5f77e80d1d01dfd6b3b9791def6d90483b5a9: Status 404 returned error can't find the container with id 76a0377dd832db948ae974067ad5f77e80d1d01dfd6b3b9791def6d90483b5a9 Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.422513 4784 scope.go:117] "RemoveContainer" containerID="e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.428425 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-blw4q"] Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.433445 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-blw4q"] Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.444656 4784 scope.go:117] "RemoveContainer" containerID="c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.467177 4784 scope.go:117] "RemoveContainer" containerID="4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.493065 4784 scope.go:117] "RemoveContainer" containerID="ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.515419 4784 scope.go:117] "RemoveContainer" containerID="8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.538169 4784 scope.go:117] "RemoveContainer" containerID="7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.572507 4784 scope.go:117] "RemoveContainer" containerID="f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.621862 4784 scope.go:117] "RemoveContainer" containerID="1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.642166 4784 scope.go:117] "RemoveContainer" containerID="b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.661492 4784 scope.go:117] "RemoveContainer" containerID="ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.678243 4784 scope.go:117] "RemoveContainer" containerID="e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.678760 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4\": container with ID starting with e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4 not found: ID does not exist" containerID="e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.678791 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4"} err="failed to get container status \"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4\": rpc error: code = NotFound desc = could not find container \"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4\": container with ID starting with e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.678812 4784 scope.go:117] "RemoveContainer" containerID="c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.679304 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\": container with ID starting with c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df not found: ID does not exist" containerID="c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.679349 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df"} err="failed to get container status \"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\": rpc error: code = NotFound desc = could not find container \"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\": container with ID starting with c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.679373 4784 scope.go:117] "RemoveContainer" containerID="4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.679707 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\": container with ID starting with 4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5 not found: ID does not exist" containerID="4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.679730 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5"} err="failed to get container status \"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\": rpc error: code = NotFound desc = could not find container \"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\": container with ID starting with 4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.679747 4784 scope.go:117] "RemoveContainer" containerID="ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.680124 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\": container with ID starting with ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4 not found: ID does not exist" containerID="ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.680156 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4"} err="failed to get container status \"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\": rpc error: code = NotFound desc = could not find container \"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\": container with ID starting with ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.680177 4784 scope.go:117] "RemoveContainer" containerID="8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.680449 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\": container with ID starting with 8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491 not found: ID does not exist" containerID="8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.680476 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491"} err="failed to get container status \"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\": rpc error: code = NotFound desc = could not find container \"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\": container with ID starting with 8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.680489 4784 scope.go:117] "RemoveContainer" containerID="7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.680754 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\": container with ID starting with 7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda not found: ID does not exist" containerID="7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.680773 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda"} err="failed to get container status \"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\": rpc error: code = NotFound desc = could not find container \"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\": container with ID starting with 7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.680784 4784 scope.go:117] "RemoveContainer" containerID="f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.681040 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\": container with ID starting with f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4 not found: ID does not exist" containerID="f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.681057 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4"} err="failed to get container status \"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\": rpc error: code = NotFound desc = could not find container \"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\": container with ID starting with f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.681069 4784 scope.go:117] "RemoveContainer" containerID="1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.681414 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\": container with ID starting with 1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb not found: ID does not exist" containerID="1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.681447 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb"} err="failed to get container status \"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\": rpc error: code = NotFound desc = could not find container \"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\": container with ID starting with 1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.681468 4784 scope.go:117] "RemoveContainer" containerID="b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.681783 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\": container with ID starting with b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a not found: ID does not exist" containerID="b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.681806 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a"} err="failed to get container status \"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\": rpc error: code = NotFound desc = could not find container \"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\": container with ID starting with b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.681819 4784 scope.go:117] "RemoveContainer" containerID="ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb" Jan 06 08:26:21 crc kubenswrapper[4784]: E0106 08:26:21.682153 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\": container with ID starting with ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb not found: ID does not exist" containerID="ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.682172 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb"} err="failed to get container status \"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\": rpc error: code = NotFound desc = could not find container \"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\": container with ID starting with ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.682183 4784 scope.go:117] "RemoveContainer" containerID="e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.682399 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4"} err="failed to get container status \"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4\": rpc error: code = NotFound desc = could not find container \"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4\": container with ID starting with e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.682428 4784 scope.go:117] "RemoveContainer" containerID="c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.682744 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df"} err="failed to get container status \"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\": rpc error: code = NotFound desc = could not find container \"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\": container with ID starting with c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.682768 4784 scope.go:117] "RemoveContainer" containerID="4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.682975 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5"} err="failed to get container status \"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\": rpc error: code = NotFound desc = could not find container \"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\": container with ID starting with 4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.682997 4784 scope.go:117] "RemoveContainer" containerID="ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.683230 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4"} err="failed to get container status \"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\": rpc error: code = NotFound desc = could not find container \"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\": container with ID starting with ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.683253 4784 scope.go:117] "RemoveContainer" containerID="8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.683501 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491"} err="failed to get container status \"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\": rpc error: code = NotFound desc = could not find container \"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\": container with ID starting with 8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.683571 4784 scope.go:117] "RemoveContainer" containerID="7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.683915 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda"} err="failed to get container status \"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\": rpc error: code = NotFound desc = could not find container \"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\": container with ID starting with 7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.683940 4784 scope.go:117] "RemoveContainer" containerID="f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.684180 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4"} err="failed to get container status \"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\": rpc error: code = NotFound desc = could not find container \"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\": container with ID starting with f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.684212 4784 scope.go:117] "RemoveContainer" containerID="1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.684537 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb"} err="failed to get container status \"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\": rpc error: code = NotFound desc = could not find container \"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\": container with ID starting with 1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.684572 4784 scope.go:117] "RemoveContainer" containerID="b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.684906 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a"} err="failed to get container status \"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\": rpc error: code = NotFound desc = could not find container \"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\": container with ID starting with b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.684954 4784 scope.go:117] "RemoveContainer" containerID="ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.685316 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb"} err="failed to get container status \"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\": rpc error: code = NotFound desc = could not find container \"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\": container with ID starting with ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.685334 4784 scope.go:117] "RemoveContainer" containerID="e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.685933 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4"} err="failed to get container status \"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4\": rpc error: code = NotFound desc = could not find container \"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4\": container with ID starting with e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.685963 4784 scope.go:117] "RemoveContainer" containerID="c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.686204 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df"} err="failed to get container status \"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\": rpc error: code = NotFound desc = could not find container \"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\": container with ID starting with c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.686219 4784 scope.go:117] "RemoveContainer" containerID="4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.686486 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5"} err="failed to get container status \"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\": rpc error: code = NotFound desc = could not find container \"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\": container with ID starting with 4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.686502 4784 scope.go:117] "RemoveContainer" containerID="ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.686832 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4"} err="failed to get container status \"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\": rpc error: code = NotFound desc = could not find container \"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\": container with ID starting with ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.686862 4784 scope.go:117] "RemoveContainer" containerID="8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.687100 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491"} err="failed to get container status \"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\": rpc error: code = NotFound desc = could not find container \"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\": container with ID starting with 8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.687127 4784 scope.go:117] "RemoveContainer" containerID="7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.687431 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda"} err="failed to get container status \"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\": rpc error: code = NotFound desc = could not find container \"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\": container with ID starting with 7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.687451 4784 scope.go:117] "RemoveContainer" containerID="f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.687688 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4"} err="failed to get container status \"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\": rpc error: code = NotFound desc = could not find container \"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\": container with ID starting with f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.687717 4784 scope.go:117] "RemoveContainer" containerID="1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.687928 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb"} err="failed to get container status \"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\": rpc error: code = NotFound desc = could not find container \"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\": container with ID starting with 1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.687949 4784 scope.go:117] "RemoveContainer" containerID="b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.688234 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a"} err="failed to get container status \"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\": rpc error: code = NotFound desc = could not find container \"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\": container with ID starting with b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.688261 4784 scope.go:117] "RemoveContainer" containerID="ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.688516 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb"} err="failed to get container status \"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\": rpc error: code = NotFound desc = could not find container \"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\": container with ID starting with ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.688536 4784 scope.go:117] "RemoveContainer" containerID="e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.688815 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4"} err="failed to get container status \"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4\": rpc error: code = NotFound desc = could not find container \"e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4\": container with ID starting with e4702eba8b28dc88ab3b56e50c6d5b483b6328d925c72135b542064a2a2e13e4 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.688841 4784 scope.go:117] "RemoveContainer" containerID="c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.689171 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df"} err="failed to get container status \"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\": rpc error: code = NotFound desc = could not find container \"c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df\": container with ID starting with c3c2a8db2c001830706cce09ea368ded16f0063e9c93438d2db6b193e8f7f5df not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.689201 4784 scope.go:117] "RemoveContainer" containerID="4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.689478 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5"} err="failed to get container status \"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\": rpc error: code = NotFound desc = could not find container \"4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5\": container with ID starting with 4838a5c8ed9121c23e89390166ce650414a00e5cf2dd6abfa66bf4e88d5e77e5 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.689497 4784 scope.go:117] "RemoveContainer" containerID="ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.689807 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4"} err="failed to get container status \"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\": rpc error: code = NotFound desc = could not find container \"ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4\": container with ID starting with ac7903658a6ad4d22143298e5add50b0608b71ead6e7c690b99479ae82da30a4 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.689827 4784 scope.go:117] "RemoveContainer" containerID="8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.690119 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491"} err="failed to get container status \"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\": rpc error: code = NotFound desc = could not find container \"8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491\": container with ID starting with 8c97ea7f09a591d2b063c39a066a7f718164543d2b6916c4cb9c031550ba3491 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.690146 4784 scope.go:117] "RemoveContainer" containerID="7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.690399 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda"} err="failed to get container status \"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\": rpc error: code = NotFound desc = could not find container \"7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda\": container with ID starting with 7820c264353cba76d28f15f05da49152fd218daf86f83d9e42f4c91d6ad10dda not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.690422 4784 scope.go:117] "RemoveContainer" containerID="f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.690651 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4"} err="failed to get container status \"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\": rpc error: code = NotFound desc = could not find container \"f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4\": container with ID starting with f57bf96395a4d7053908f82cc1c9a08c13ae6042748a57913429a9af40c578d4 not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.690679 4784 scope.go:117] "RemoveContainer" containerID="1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.690955 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb"} err="failed to get container status \"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\": rpc error: code = NotFound desc = could not find container \"1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb\": container with ID starting with 1860fefce1f648610957b0c611bc2c5d8234d56ee42981ee85c57504033afbeb not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.691028 4784 scope.go:117] "RemoveContainer" containerID="b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.691304 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a"} err="failed to get container status \"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\": rpc error: code = NotFound desc = could not find container \"b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a\": container with ID starting with b1cfeca29de783497c383e4d656e3164b78f75de4ecb20312edbc5d8d01b722a not found: ID does not exist" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.691325 4784 scope.go:117] "RemoveContainer" containerID="ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb" Jan 06 08:26:21 crc kubenswrapper[4784]: I0106 08:26:21.691641 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb"} err="failed to get container status \"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\": rpc error: code = NotFound desc = could not find container \"ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb\": container with ID starting with ff9f1003581eacfad17ea69ed090346fc513af4b991b21a03475f2ca605c40cb not found: ID does not exist" Jan 06 08:26:22 crc kubenswrapper[4784]: I0106 08:26:22.320769 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="700c7389-9fff-4331-9d37-6af2ff592ac5" path="/var/lib/kubelet/pods/700c7389-9fff-4331-9d37-6af2ff592ac5/volumes" Jan 06 08:26:22 crc kubenswrapper[4784]: I0106 08:26:22.393471 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-l2xdd_85f24cc3-ceca-49ce-b774-32e773e72c1a/kube-multus/2.log" Jan 06 08:26:22 crc kubenswrapper[4784]: I0106 08:26:22.398065 4784 generic.go:334] "Generic (PLEG): container finished" podID="33fc2d11-139b-40c6-bc70-52b1e45b8c0c" containerID="07416a4845b3f4ba112a568697bb76067012f7ca9de2768d70d1bee5c047929c" exitCode=0 Jan 06 08:26:22 crc kubenswrapper[4784]: I0106 08:26:22.398168 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" event={"ID":"33fc2d11-139b-40c6-bc70-52b1e45b8c0c","Type":"ContainerDied","Data":"07416a4845b3f4ba112a568697bb76067012f7ca9de2768d70d1bee5c047929c"} Jan 06 08:26:22 crc kubenswrapper[4784]: I0106 08:26:22.398225 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" event={"ID":"33fc2d11-139b-40c6-bc70-52b1e45b8c0c","Type":"ContainerStarted","Data":"76a0377dd832db948ae974067ad5f77e80d1d01dfd6b3b9791def6d90483b5a9"} Jan 06 08:26:23 crc kubenswrapper[4784]: I0106 08:26:23.410927 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" event={"ID":"33fc2d11-139b-40c6-bc70-52b1e45b8c0c","Type":"ContainerStarted","Data":"3d9fb76643f2a6b63b534d6c1fc6e05459bf979cc8fac006bbfe66631ac2afe4"} Jan 06 08:26:23 crc kubenswrapper[4784]: I0106 08:26:23.412198 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" event={"ID":"33fc2d11-139b-40c6-bc70-52b1e45b8c0c","Type":"ContainerStarted","Data":"c1523ad809f3d03db2c9142d41ae4a5bce55c88e9af2c87efe65771734e6e924"} Jan 06 08:26:23 crc kubenswrapper[4784]: I0106 08:26:23.412217 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" event={"ID":"33fc2d11-139b-40c6-bc70-52b1e45b8c0c","Type":"ContainerStarted","Data":"25656433b20ad9c39cde5c1ee066ca0c780075b673b37dab1b76806029b67e02"} Jan 06 08:26:23 crc kubenswrapper[4784]: I0106 08:26:23.412232 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" event={"ID":"33fc2d11-139b-40c6-bc70-52b1e45b8c0c","Type":"ContainerStarted","Data":"b748c7758dba6e6b292400f5eae3895835007cd6ba0e7c5029ab636c30d876d5"} Jan 06 08:26:23 crc kubenswrapper[4784]: I0106 08:26:23.412246 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" event={"ID":"33fc2d11-139b-40c6-bc70-52b1e45b8c0c","Type":"ContainerStarted","Data":"90302b4ff397e128416aa68010be7a93e5a5ecfbc260274c20fe891e87a74986"} Jan 06 08:26:23 crc kubenswrapper[4784]: I0106 08:26:23.412260 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" event={"ID":"33fc2d11-139b-40c6-bc70-52b1e45b8c0c","Type":"ContainerStarted","Data":"c9311a216f813b6572cec5f38ccbd275dd423723bfce4930d3ba33b5ecac7a6b"} Jan 06 08:26:26 crc kubenswrapper[4784]: I0106 08:26:26.438034 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" event={"ID":"33fc2d11-139b-40c6-bc70-52b1e45b8c0c","Type":"ContainerStarted","Data":"01bff7fbfd85c106b0b4e9c870d0091c73a6894756292212164b8729d60adb42"} Jan 06 08:26:28 crc kubenswrapper[4784]: I0106 08:26:28.458690 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" event={"ID":"33fc2d11-139b-40c6-bc70-52b1e45b8c0c","Type":"ContainerStarted","Data":"36a30d2689a9ad189135137a762c68140fe62e48425d2f4976c874772c3aa4a3"} Jan 06 08:26:28 crc kubenswrapper[4784]: I0106 08:26:28.459475 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:28 crc kubenswrapper[4784]: I0106 08:26:28.459488 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:28 crc kubenswrapper[4784]: I0106 08:26:28.489793 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:28 crc kubenswrapper[4784]: I0106 08:26:28.510216 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" podStartSLOduration=7.510192609 podStartE2EDuration="7.510192609s" podCreationTimestamp="2026-01-06 08:26:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:26:28.504787861 +0000 UTC m=+690.550960708" watchObservedRunningTime="2026-01-06 08:26:28.510192609 +0000 UTC m=+690.556365446" Jan 06 08:26:29 crc kubenswrapper[4784]: I0106 08:26:29.467070 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:29 crc kubenswrapper[4784]: I0106 08:26:29.550501 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.013520 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-t46db"] Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.014781 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.016990 4784 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-l6flr" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.017322 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.017017 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.018313 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.026286 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-t46db"] Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.129437 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-node-mnt\") pod \"crc-storage-crc-t46db\" (UID: \"4853126c-df1d-4bf9-bbe3-b0c2d47eec21\") " pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.129487 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-crc-storage\") pod \"crc-storage-crc-t46db\" (UID: \"4853126c-df1d-4bf9-bbe3-b0c2d47eec21\") " pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.129522 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spl8q\" (UniqueName: \"kubernetes.io/projected/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-kube-api-access-spl8q\") pod \"crc-storage-crc-t46db\" (UID: \"4853126c-df1d-4bf9-bbe3-b0c2d47eec21\") " pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.231210 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-crc-storage\") pod \"crc-storage-crc-t46db\" (UID: \"4853126c-df1d-4bf9-bbe3-b0c2d47eec21\") " pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.231276 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-node-mnt\") pod \"crc-storage-crc-t46db\" (UID: \"4853126c-df1d-4bf9-bbe3-b0c2d47eec21\") " pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.231305 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spl8q\" (UniqueName: \"kubernetes.io/projected/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-kube-api-access-spl8q\") pod \"crc-storage-crc-t46db\" (UID: \"4853126c-df1d-4bf9-bbe3-b0c2d47eec21\") " pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.231899 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-node-mnt\") pod \"crc-storage-crc-t46db\" (UID: \"4853126c-df1d-4bf9-bbe3-b0c2d47eec21\") " pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.232686 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-crc-storage\") pod \"crc-storage-crc-t46db\" (UID: \"4853126c-df1d-4bf9-bbe3-b0c2d47eec21\") " pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.255450 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spl8q\" (UniqueName: \"kubernetes.io/projected/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-kube-api-access-spl8q\") pod \"crc-storage-crc-t46db\" (UID: \"4853126c-df1d-4bf9-bbe3-b0c2d47eec21\") " pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.343064 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: E0106 08:26:30.373585 4784 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-t46db_crc-storage_4853126c-df1d-4bf9-bbe3-b0c2d47eec21_0(1831572f9201b4147f3fb982dbfd436d526708c80e59e620e7c122b528a731ce): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 06 08:26:30 crc kubenswrapper[4784]: E0106 08:26:30.373694 4784 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-t46db_crc-storage_4853126c-df1d-4bf9-bbe3-b0c2d47eec21_0(1831572f9201b4147f3fb982dbfd436d526708c80e59e620e7c122b528a731ce): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: E0106 08:26:30.373721 4784 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-t46db_crc-storage_4853126c-df1d-4bf9-bbe3-b0c2d47eec21_0(1831572f9201b4147f3fb982dbfd436d526708c80e59e620e7c122b528a731ce): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: E0106 08:26:30.373800 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-t46db_crc-storage(4853126c-df1d-4bf9-bbe3-b0c2d47eec21)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-t46db_crc-storage(4853126c-df1d-4bf9-bbe3-b0c2d47eec21)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-t46db_crc-storage_4853126c-df1d-4bf9-bbe3-b0c2d47eec21_0(1831572f9201b4147f3fb982dbfd436d526708c80e59e620e7c122b528a731ce): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-t46db" podUID="4853126c-df1d-4bf9-bbe3-b0c2d47eec21" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.471378 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: I0106 08:26:30.472186 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: E0106 08:26:30.508356 4784 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-t46db_crc-storage_4853126c-df1d-4bf9-bbe3-b0c2d47eec21_0(099ecd781632507cc90bfa81be0c492842649f0d97910be9e2e248f58ec9164d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 06 08:26:30 crc kubenswrapper[4784]: E0106 08:26:30.508422 4784 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-t46db_crc-storage_4853126c-df1d-4bf9-bbe3-b0c2d47eec21_0(099ecd781632507cc90bfa81be0c492842649f0d97910be9e2e248f58ec9164d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: E0106 08:26:30.508448 4784 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-t46db_crc-storage_4853126c-df1d-4bf9-bbe3-b0c2d47eec21_0(099ecd781632507cc90bfa81be0c492842649f0d97910be9e2e248f58ec9164d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:30 crc kubenswrapper[4784]: E0106 08:26:30.508496 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-t46db_crc-storage(4853126c-df1d-4bf9-bbe3-b0c2d47eec21)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-t46db_crc-storage(4853126c-df1d-4bf9-bbe3-b0c2d47eec21)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-t46db_crc-storage_4853126c-df1d-4bf9-bbe3-b0c2d47eec21_0(099ecd781632507cc90bfa81be0c492842649f0d97910be9e2e248f58ec9164d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-t46db" podUID="4853126c-df1d-4bf9-bbe3-b0c2d47eec21" Jan 06 08:26:32 crc kubenswrapper[4784]: I0106 08:26:32.312742 4784 scope.go:117] "RemoveContainer" containerID="64398024d93df89dcd6c9902c58da525acda8a2b395f1b0a0bf4a25d313c68a6" Jan 06 08:26:32 crc kubenswrapper[4784]: E0106 08:26:32.313457 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-l2xdd_openshift-multus(85f24cc3-ceca-49ce-b774-32e773e72c1a)\"" pod="openshift-multus/multus-l2xdd" podUID="85f24cc3-ceca-49ce-b774-32e773e72c1a" Jan 06 08:26:42 crc kubenswrapper[4784]: I0106 08:26:42.311763 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:42 crc kubenswrapper[4784]: I0106 08:26:42.312316 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:42 crc kubenswrapper[4784]: E0106 08:26:42.349469 4784 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-t46db_crc-storage_4853126c-df1d-4bf9-bbe3-b0c2d47eec21_0(e5ed064caa74f4abfb4c1b058e6f46a81d98f1e2f6e053919a463ac371e5d91b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 06 08:26:42 crc kubenswrapper[4784]: E0106 08:26:42.349688 4784 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-t46db_crc-storage_4853126c-df1d-4bf9-bbe3-b0c2d47eec21_0(e5ed064caa74f4abfb4c1b058e6f46a81d98f1e2f6e053919a463ac371e5d91b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:42 crc kubenswrapper[4784]: E0106 08:26:42.349793 4784 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-t46db_crc-storage_4853126c-df1d-4bf9-bbe3-b0c2d47eec21_0(e5ed064caa74f4abfb4c1b058e6f46a81d98f1e2f6e053919a463ac371e5d91b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:42 crc kubenswrapper[4784]: E0106 08:26:42.349966 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-t46db_crc-storage(4853126c-df1d-4bf9-bbe3-b0c2d47eec21)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-t46db_crc-storage(4853126c-df1d-4bf9-bbe3-b0c2d47eec21)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-t46db_crc-storage_4853126c-df1d-4bf9-bbe3-b0c2d47eec21_0(e5ed064caa74f4abfb4c1b058e6f46a81d98f1e2f6e053919a463ac371e5d91b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-t46db" podUID="4853126c-df1d-4bf9-bbe3-b0c2d47eec21" Jan 06 08:26:43 crc kubenswrapper[4784]: I0106 08:26:43.312672 4784 scope.go:117] "RemoveContainer" containerID="64398024d93df89dcd6c9902c58da525acda8a2b395f1b0a0bf4a25d313c68a6" Jan 06 08:26:43 crc kubenswrapper[4784]: I0106 08:26:43.569683 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-l2xdd_85f24cc3-ceca-49ce-b774-32e773e72c1a/kube-multus/2.log" Jan 06 08:26:43 crc kubenswrapper[4784]: I0106 08:26:43.570864 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-l2xdd" event={"ID":"85f24cc3-ceca-49ce-b774-32e773e72c1a","Type":"ContainerStarted","Data":"2e08b10b2ccdc68ea81609d2cd5e767723771576abf8f898cb0523f6ff2bcfdd"} Jan 06 08:26:51 crc kubenswrapper[4784]: I0106 08:26:51.400419 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dh67j" Jan 06 08:26:53 crc kubenswrapper[4784]: I0106 08:26:53.311729 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:53 crc kubenswrapper[4784]: I0106 08:26:53.312243 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:53 crc kubenswrapper[4784]: I0106 08:26:53.579272 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-t46db"] Jan 06 08:26:53 crc kubenswrapper[4784]: I0106 08:26:53.592198 4784 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 06 08:26:53 crc kubenswrapper[4784]: I0106 08:26:53.654291 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-t46db" event={"ID":"4853126c-df1d-4bf9-bbe3-b0c2d47eec21","Type":"ContainerStarted","Data":"824a45331a7ad9c45fc0c3711708f56b725a44c9fe185f0551242fab7df7d183"} Jan 06 08:26:56 crc kubenswrapper[4784]: I0106 08:26:56.675931 4784 generic.go:334] "Generic (PLEG): container finished" podID="4853126c-df1d-4bf9-bbe3-b0c2d47eec21" containerID="b2e5358c1ff5f41895d6449d3f32633bdf3c588e3a3bbfe6b5035d485ef6b41b" exitCode=0 Jan 06 08:26:56 crc kubenswrapper[4784]: I0106 08:26:56.676012 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-t46db" event={"ID":"4853126c-df1d-4bf9-bbe3-b0c2d47eec21","Type":"ContainerDied","Data":"b2e5358c1ff5f41895d6449d3f32633bdf3c588e3a3bbfe6b5035d485ef6b41b"} Jan 06 08:26:57 crc kubenswrapper[4784]: I0106 08:26:57.963957 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:26:58 crc kubenswrapper[4784]: I0106 08:26:58.013095 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spl8q\" (UniqueName: \"kubernetes.io/projected/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-kube-api-access-spl8q\") pod \"4853126c-df1d-4bf9-bbe3-b0c2d47eec21\" (UID: \"4853126c-df1d-4bf9-bbe3-b0c2d47eec21\") " Jan 06 08:26:58 crc kubenswrapper[4784]: I0106 08:26:58.013272 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-node-mnt\") pod \"4853126c-df1d-4bf9-bbe3-b0c2d47eec21\" (UID: \"4853126c-df1d-4bf9-bbe3-b0c2d47eec21\") " Jan 06 08:26:58 crc kubenswrapper[4784]: I0106 08:26:58.013329 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-crc-storage\") pod \"4853126c-df1d-4bf9-bbe3-b0c2d47eec21\" (UID: \"4853126c-df1d-4bf9-bbe3-b0c2d47eec21\") " Jan 06 08:26:58 crc kubenswrapper[4784]: I0106 08:26:58.013450 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "4853126c-df1d-4bf9-bbe3-b0c2d47eec21" (UID: "4853126c-df1d-4bf9-bbe3-b0c2d47eec21"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:26:58 crc kubenswrapper[4784]: I0106 08:26:58.013851 4784 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:58 crc kubenswrapper[4784]: I0106 08:26:58.019839 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-kube-api-access-spl8q" (OuterVolumeSpecName: "kube-api-access-spl8q") pod "4853126c-df1d-4bf9-bbe3-b0c2d47eec21" (UID: "4853126c-df1d-4bf9-bbe3-b0c2d47eec21"). InnerVolumeSpecName "kube-api-access-spl8q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:26:58 crc kubenswrapper[4784]: I0106 08:26:58.029635 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "4853126c-df1d-4bf9-bbe3-b0c2d47eec21" (UID: "4853126c-df1d-4bf9-bbe3-b0c2d47eec21"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:26:58 crc kubenswrapper[4784]: I0106 08:26:58.115835 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spl8q\" (UniqueName: \"kubernetes.io/projected/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-kube-api-access-spl8q\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:58 crc kubenswrapper[4784]: I0106 08:26:58.115890 4784 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/4853126c-df1d-4bf9-bbe3-b0c2d47eec21-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 06 08:26:58 crc kubenswrapper[4784]: I0106 08:26:58.693233 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-t46db" event={"ID":"4853126c-df1d-4bf9-bbe3-b0c2d47eec21","Type":"ContainerDied","Data":"824a45331a7ad9c45fc0c3711708f56b725a44c9fe185f0551242fab7df7d183"} Jan 06 08:26:58 crc kubenswrapper[4784]: I0106 08:26:58.693284 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="824a45331a7ad9c45fc0c3711708f56b725a44c9fe185f0551242fab7df7d183" Jan 06 08:26:58 crc kubenswrapper[4784]: I0106 08:26:58.693309 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-t46db" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.085395 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx"] Jan 06 08:27:07 crc kubenswrapper[4784]: E0106 08:27:07.086854 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4853126c-df1d-4bf9-bbe3-b0c2d47eec21" containerName="storage" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.086882 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="4853126c-df1d-4bf9-bbe3-b0c2d47eec21" containerName="storage" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.087069 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="4853126c-df1d-4bf9-bbe3-b0c2d47eec21" containerName="storage" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.088419 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.091252 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.095750 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx"] Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.251510 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhhgg\" (UniqueName: \"kubernetes.io/projected/4f1df559-23aa-4dbe-859b-3404209af722-kube-api-access-lhhgg\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx\" (UID: \"4f1df559-23aa-4dbe-859b-3404209af722\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.251705 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4f1df559-23aa-4dbe-859b-3404209af722-util\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx\" (UID: \"4f1df559-23aa-4dbe-859b-3404209af722\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.251748 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4f1df559-23aa-4dbe-859b-3404209af722-bundle\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx\" (UID: \"4f1df559-23aa-4dbe-859b-3404209af722\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.353276 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4f1df559-23aa-4dbe-859b-3404209af722-util\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx\" (UID: \"4f1df559-23aa-4dbe-859b-3404209af722\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.353364 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4f1df559-23aa-4dbe-859b-3404209af722-bundle\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx\" (UID: \"4f1df559-23aa-4dbe-859b-3404209af722\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.353441 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhhgg\" (UniqueName: \"kubernetes.io/projected/4f1df559-23aa-4dbe-859b-3404209af722-kube-api-access-lhhgg\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx\" (UID: \"4f1df559-23aa-4dbe-859b-3404209af722\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.354236 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4f1df559-23aa-4dbe-859b-3404209af722-util\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx\" (UID: \"4f1df559-23aa-4dbe-859b-3404209af722\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.354248 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4f1df559-23aa-4dbe-859b-3404209af722-bundle\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx\" (UID: \"4f1df559-23aa-4dbe-859b-3404209af722\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.390345 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhhgg\" (UniqueName: \"kubernetes.io/projected/4f1df559-23aa-4dbe-859b-3404209af722-kube-api-access-lhhgg\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx\" (UID: \"4f1df559-23aa-4dbe-859b-3404209af722\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.467062 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" Jan 06 08:27:07 crc kubenswrapper[4784]: I0106 08:27:07.740886 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx"] Jan 06 08:27:08 crc kubenswrapper[4784]: I0106 08:27:08.762116 4784 generic.go:334] "Generic (PLEG): container finished" podID="4f1df559-23aa-4dbe-859b-3404209af722" containerID="1346601c9ee6fc0bcf32f4a3d605d4595036e5cfea66c03fd1969f31c99ce2ba" exitCode=0 Jan 06 08:27:08 crc kubenswrapper[4784]: I0106 08:27:08.762421 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" event={"ID":"4f1df559-23aa-4dbe-859b-3404209af722","Type":"ContainerDied","Data":"1346601c9ee6fc0bcf32f4a3d605d4595036e5cfea66c03fd1969f31c99ce2ba"} Jan 06 08:27:08 crc kubenswrapper[4784]: I0106 08:27:08.762828 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" event={"ID":"4f1df559-23aa-4dbe-859b-3404209af722","Type":"ContainerStarted","Data":"3a4d00f07cd5f0dfd77735d8cb808f99ac420e5ea6dbbb72f87740a3b75c36fb"} Jan 06 08:27:10 crc kubenswrapper[4784]: I0106 08:27:10.778824 4784 generic.go:334] "Generic (PLEG): container finished" podID="4f1df559-23aa-4dbe-859b-3404209af722" containerID="f2b2d3ed8007086281c1288eedb0629538b969c1bbc59401237d90b495c65096" exitCode=0 Jan 06 08:27:10 crc kubenswrapper[4784]: I0106 08:27:10.778899 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" event={"ID":"4f1df559-23aa-4dbe-859b-3404209af722","Type":"ContainerDied","Data":"f2b2d3ed8007086281c1288eedb0629538b969c1bbc59401237d90b495c65096"} Jan 06 08:27:11 crc kubenswrapper[4784]: I0106 08:27:11.792740 4784 generic.go:334] "Generic (PLEG): container finished" podID="4f1df559-23aa-4dbe-859b-3404209af722" containerID="989f7bfaec3a8c7be6a643f4d3176a16e5489600feed8f7fcecfb66450b223d3" exitCode=0 Jan 06 08:27:11 crc kubenswrapper[4784]: I0106 08:27:11.792881 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" event={"ID":"4f1df559-23aa-4dbe-859b-3404209af722","Type":"ContainerDied","Data":"989f7bfaec3a8c7be6a643f4d3176a16e5489600feed8f7fcecfb66450b223d3"} Jan 06 08:27:13 crc kubenswrapper[4784]: I0106 08:27:13.057843 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" Jan 06 08:27:13 crc kubenswrapper[4784]: I0106 08:27:13.240427 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lhhgg\" (UniqueName: \"kubernetes.io/projected/4f1df559-23aa-4dbe-859b-3404209af722-kube-api-access-lhhgg\") pod \"4f1df559-23aa-4dbe-859b-3404209af722\" (UID: \"4f1df559-23aa-4dbe-859b-3404209af722\") " Jan 06 08:27:13 crc kubenswrapper[4784]: I0106 08:27:13.240983 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4f1df559-23aa-4dbe-859b-3404209af722-util\") pod \"4f1df559-23aa-4dbe-859b-3404209af722\" (UID: \"4f1df559-23aa-4dbe-859b-3404209af722\") " Jan 06 08:27:13 crc kubenswrapper[4784]: I0106 08:27:13.241071 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4f1df559-23aa-4dbe-859b-3404209af722-bundle\") pod \"4f1df559-23aa-4dbe-859b-3404209af722\" (UID: \"4f1df559-23aa-4dbe-859b-3404209af722\") " Jan 06 08:27:13 crc kubenswrapper[4784]: I0106 08:27:13.252639 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f1df559-23aa-4dbe-859b-3404209af722-bundle" (OuterVolumeSpecName: "bundle") pod "4f1df559-23aa-4dbe-859b-3404209af722" (UID: "4f1df559-23aa-4dbe-859b-3404209af722"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:27:13 crc kubenswrapper[4784]: I0106 08:27:13.256363 4784 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4f1df559-23aa-4dbe-859b-3404209af722-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:27:13 crc kubenswrapper[4784]: I0106 08:27:13.270168 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f1df559-23aa-4dbe-859b-3404209af722-kube-api-access-lhhgg" (OuterVolumeSpecName: "kube-api-access-lhhgg") pod "4f1df559-23aa-4dbe-859b-3404209af722" (UID: "4f1df559-23aa-4dbe-859b-3404209af722"). InnerVolumeSpecName "kube-api-access-lhhgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:27:13 crc kubenswrapper[4784]: I0106 08:27:13.357385 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lhhgg\" (UniqueName: \"kubernetes.io/projected/4f1df559-23aa-4dbe-859b-3404209af722-kube-api-access-lhhgg\") on node \"crc\" DevicePath \"\"" Jan 06 08:27:13 crc kubenswrapper[4784]: I0106 08:27:13.584905 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f1df559-23aa-4dbe-859b-3404209af722-util" (OuterVolumeSpecName: "util") pod "4f1df559-23aa-4dbe-859b-3404209af722" (UID: "4f1df559-23aa-4dbe-859b-3404209af722"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:27:13 crc kubenswrapper[4784]: I0106 08:27:13.661942 4784 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4f1df559-23aa-4dbe-859b-3404209af722-util\") on node \"crc\" DevicePath \"\"" Jan 06 08:27:13 crc kubenswrapper[4784]: I0106 08:27:13.810219 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" event={"ID":"4f1df559-23aa-4dbe-859b-3404209af722","Type":"ContainerDied","Data":"3a4d00f07cd5f0dfd77735d8cb808f99ac420e5ea6dbbb72f87740a3b75c36fb"} Jan 06 08:27:13 crc kubenswrapper[4784]: I0106 08:27:13.810320 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3a4d00f07cd5f0dfd77735d8cb808f99ac420e5ea6dbbb72f87740a3b75c36fb" Jan 06 08:27:13 crc kubenswrapper[4784]: I0106 08:27:13.810353 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx" Jan 06 08:27:14 crc kubenswrapper[4784]: I0106 08:27:14.351723 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:27:14 crc kubenswrapper[4784]: I0106 08:27:14.351824 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:27:15 crc kubenswrapper[4784]: I0106 08:27:15.747392 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-6769fb99d-p54lp"] Jan 06 08:27:15 crc kubenswrapper[4784]: E0106 08:27:15.747854 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f1df559-23aa-4dbe-859b-3404209af722" containerName="util" Jan 06 08:27:15 crc kubenswrapper[4784]: I0106 08:27:15.747874 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f1df559-23aa-4dbe-859b-3404209af722" containerName="util" Jan 06 08:27:15 crc kubenswrapper[4784]: E0106 08:27:15.747895 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f1df559-23aa-4dbe-859b-3404209af722" containerName="pull" Jan 06 08:27:15 crc kubenswrapper[4784]: I0106 08:27:15.747903 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f1df559-23aa-4dbe-859b-3404209af722" containerName="pull" Jan 06 08:27:15 crc kubenswrapper[4784]: E0106 08:27:15.747919 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f1df559-23aa-4dbe-859b-3404209af722" containerName="extract" Jan 06 08:27:15 crc kubenswrapper[4784]: I0106 08:27:15.747928 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f1df559-23aa-4dbe-859b-3404209af722" containerName="extract" Jan 06 08:27:15 crc kubenswrapper[4784]: I0106 08:27:15.748076 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f1df559-23aa-4dbe-859b-3404209af722" containerName="extract" Jan 06 08:27:15 crc kubenswrapper[4784]: I0106 08:27:15.748785 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-6769fb99d-p54lp" Jan 06 08:27:15 crc kubenswrapper[4784]: I0106 08:27:15.751087 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-fdq74" Jan 06 08:27:15 crc kubenswrapper[4784]: I0106 08:27:15.751447 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 06 08:27:15 crc kubenswrapper[4784]: I0106 08:27:15.751727 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 06 08:27:15 crc kubenswrapper[4784]: I0106 08:27:15.764075 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-6769fb99d-p54lp"] Jan 06 08:27:15 crc kubenswrapper[4784]: I0106 08:27:15.894891 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjlfr\" (UniqueName: \"kubernetes.io/projected/dcdd9a4f-b0d0-4fc3-8d24-1bb09a29fcaa-kube-api-access-gjlfr\") pod \"nmstate-operator-6769fb99d-p54lp\" (UID: \"dcdd9a4f-b0d0-4fc3-8d24-1bb09a29fcaa\") " pod="openshift-nmstate/nmstate-operator-6769fb99d-p54lp" Jan 06 08:27:15 crc kubenswrapper[4784]: I0106 08:27:15.996377 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjlfr\" (UniqueName: \"kubernetes.io/projected/dcdd9a4f-b0d0-4fc3-8d24-1bb09a29fcaa-kube-api-access-gjlfr\") pod \"nmstate-operator-6769fb99d-p54lp\" (UID: \"dcdd9a4f-b0d0-4fc3-8d24-1bb09a29fcaa\") " pod="openshift-nmstate/nmstate-operator-6769fb99d-p54lp" Jan 06 08:27:16 crc kubenswrapper[4784]: I0106 08:27:16.032053 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjlfr\" (UniqueName: \"kubernetes.io/projected/dcdd9a4f-b0d0-4fc3-8d24-1bb09a29fcaa-kube-api-access-gjlfr\") pod \"nmstate-operator-6769fb99d-p54lp\" (UID: \"dcdd9a4f-b0d0-4fc3-8d24-1bb09a29fcaa\") " pod="openshift-nmstate/nmstate-operator-6769fb99d-p54lp" Jan 06 08:27:16 crc kubenswrapper[4784]: I0106 08:27:16.066103 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-6769fb99d-p54lp" Jan 06 08:27:16 crc kubenswrapper[4784]: I0106 08:27:16.319788 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-6769fb99d-p54lp"] Jan 06 08:27:16 crc kubenswrapper[4784]: I0106 08:27:16.829268 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-6769fb99d-p54lp" event={"ID":"dcdd9a4f-b0d0-4fc3-8d24-1bb09a29fcaa","Type":"ContainerStarted","Data":"804dd7faf286830fc270d5ea14f43a5f98ac3ca3cce29640cc01e5986d7215b1"} Jan 06 08:27:20 crc kubenswrapper[4784]: I0106 08:27:20.861802 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-6769fb99d-p54lp" event={"ID":"dcdd9a4f-b0d0-4fc3-8d24-1bb09a29fcaa","Type":"ContainerStarted","Data":"5eaa6bd6cdcc778562aaab04e45a86898f172f50f9b89d0b330c03f38fa1cecc"} Jan 06 08:27:20 crc kubenswrapper[4784]: I0106 08:27:20.885311 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-6769fb99d-p54lp" podStartSLOduration=2.530665454 podStartE2EDuration="5.88527906s" podCreationTimestamp="2026-01-06 08:27:15 +0000 UTC" firstStartedPulling="2026-01-06 08:27:16.322887503 +0000 UTC m=+738.369060340" lastFinishedPulling="2026-01-06 08:27:19.677501079 +0000 UTC m=+741.723673946" observedRunningTime="2026-01-06 08:27:20.881425268 +0000 UTC m=+742.927598265" watchObservedRunningTime="2026-01-06 08:27:20.88527906 +0000 UTC m=+742.931451937" Jan 06 08:27:21 crc kubenswrapper[4784]: I0106 08:27:21.953212 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f7f7578db-tjmr9"] Jan 06 08:27:21 crc kubenswrapper[4784]: I0106 08:27:21.954878 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-tjmr9" Jan 06 08:27:21 crc kubenswrapper[4784]: I0106 08:27:21.960136 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-8mh7f" Jan 06 08:27:21 crc kubenswrapper[4784]: I0106 08:27:21.969289 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f7f7578db-tjmr9"] Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.033679 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj"] Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.034335 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.038733 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.043120 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj"] Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.046621 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-rh8g6"] Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.047449 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.091883 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptb2m\" (UniqueName: \"kubernetes.io/projected/f0efdd39-6cba-42a7-9222-9084987431a7-kube-api-access-ptb2m\") pod \"nmstate-metrics-7f7f7578db-tjmr9\" (UID: \"f0efdd39-6cba-42a7-9222-9084987431a7\") " pod="openshift-nmstate/nmstate-metrics-7f7f7578db-tjmr9" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.120733 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp"] Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.121435 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.123598 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.123623 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-n4x6c" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.131933 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.136085 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp"] Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.193198 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/10985894-27f3-4621-ad1a-40ac399731e4-nmstate-lock\") pod \"nmstate-handler-rh8g6\" (UID: \"10985894-27f3-4621-ad1a-40ac399731e4\") " pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.193274 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/10985894-27f3-4621-ad1a-40ac399731e4-dbus-socket\") pod \"nmstate-handler-rh8g6\" (UID: \"10985894-27f3-4621-ad1a-40ac399731e4\") " pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.193308 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d6b7564f-4ee7-4b23-93b1-8252ba326f2e-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-m9sxj\" (UID: \"d6b7564f-4ee7-4b23-93b1-8252ba326f2e\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.193339 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnxpt\" (UniqueName: \"kubernetes.io/projected/d6b7564f-4ee7-4b23-93b1-8252ba326f2e-kube-api-access-qnxpt\") pod \"nmstate-webhook-f8fb84555-m9sxj\" (UID: \"d6b7564f-4ee7-4b23-93b1-8252ba326f2e\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.193364 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/10985894-27f3-4621-ad1a-40ac399731e4-ovs-socket\") pod \"nmstate-handler-rh8g6\" (UID: \"10985894-27f3-4621-ad1a-40ac399731e4\") " pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.193393 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptb2m\" (UniqueName: \"kubernetes.io/projected/f0efdd39-6cba-42a7-9222-9084987431a7-kube-api-access-ptb2m\") pod \"nmstate-metrics-7f7f7578db-tjmr9\" (UID: \"f0efdd39-6cba-42a7-9222-9084987431a7\") " pod="openshift-nmstate/nmstate-metrics-7f7f7578db-tjmr9" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.193416 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djqkq\" (UniqueName: \"kubernetes.io/projected/10985894-27f3-4621-ad1a-40ac399731e4-kube-api-access-djqkq\") pod \"nmstate-handler-rh8g6\" (UID: \"10985894-27f3-4621-ad1a-40ac399731e4\") " pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.221307 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptb2m\" (UniqueName: \"kubernetes.io/projected/f0efdd39-6cba-42a7-9222-9084987431a7-kube-api-access-ptb2m\") pod \"nmstate-metrics-7f7f7578db-tjmr9\" (UID: \"f0efdd39-6cba-42a7-9222-9084987431a7\") " pod="openshift-nmstate/nmstate-metrics-7f7f7578db-tjmr9" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.283939 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-tjmr9" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.296563 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/10985894-27f3-4621-ad1a-40ac399731e4-nmstate-lock\") pod \"nmstate-handler-rh8g6\" (UID: \"10985894-27f3-4621-ad1a-40ac399731e4\") " pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.296612 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/e2e0c600-184b-4cc7-8b1f-ed0d37f7d141-nginx-conf\") pod \"nmstate-console-plugin-6ff7998486-9zrkp\" (UID: \"e2e0c600-184b-4cc7-8b1f-ed0d37f7d141\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.296647 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/10985894-27f3-4621-ad1a-40ac399731e4-dbus-socket\") pod \"nmstate-handler-rh8g6\" (UID: \"10985894-27f3-4621-ad1a-40ac399731e4\") " pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.296668 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znv8t\" (UniqueName: \"kubernetes.io/projected/e2e0c600-184b-4cc7-8b1f-ed0d37f7d141-kube-api-access-znv8t\") pod \"nmstate-console-plugin-6ff7998486-9zrkp\" (UID: \"e2e0c600-184b-4cc7-8b1f-ed0d37f7d141\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.296692 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d6b7564f-4ee7-4b23-93b1-8252ba326f2e-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-m9sxj\" (UID: \"d6b7564f-4ee7-4b23-93b1-8252ba326f2e\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.296732 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnxpt\" (UniqueName: \"kubernetes.io/projected/d6b7564f-4ee7-4b23-93b1-8252ba326f2e-kube-api-access-qnxpt\") pod \"nmstate-webhook-f8fb84555-m9sxj\" (UID: \"d6b7564f-4ee7-4b23-93b1-8252ba326f2e\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.296754 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/10985894-27f3-4621-ad1a-40ac399731e4-ovs-socket\") pod \"nmstate-handler-rh8g6\" (UID: \"10985894-27f3-4621-ad1a-40ac399731e4\") " pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.296757 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/10985894-27f3-4621-ad1a-40ac399731e4-nmstate-lock\") pod \"nmstate-handler-rh8g6\" (UID: \"10985894-27f3-4621-ad1a-40ac399731e4\") " pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.296776 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djqkq\" (UniqueName: \"kubernetes.io/projected/10985894-27f3-4621-ad1a-40ac399731e4-kube-api-access-djqkq\") pod \"nmstate-handler-rh8g6\" (UID: \"10985894-27f3-4621-ad1a-40ac399731e4\") " pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.296876 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/e2e0c600-184b-4cc7-8b1f-ed0d37f7d141-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-9zrkp\" (UID: \"e2e0c600-184b-4cc7-8b1f-ed0d37f7d141\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.297131 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/10985894-27f3-4621-ad1a-40ac399731e4-ovs-socket\") pod \"nmstate-handler-rh8g6\" (UID: \"10985894-27f3-4621-ad1a-40ac399731e4\") " pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.297368 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/10985894-27f3-4621-ad1a-40ac399731e4-dbus-socket\") pod \"nmstate-handler-rh8g6\" (UID: \"10985894-27f3-4621-ad1a-40ac399731e4\") " pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.303055 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d6b7564f-4ee7-4b23-93b1-8252ba326f2e-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-m9sxj\" (UID: \"d6b7564f-4ee7-4b23-93b1-8252ba326f2e\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.321202 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djqkq\" (UniqueName: \"kubernetes.io/projected/10985894-27f3-4621-ad1a-40ac399731e4-kube-api-access-djqkq\") pod \"nmstate-handler-rh8g6\" (UID: \"10985894-27f3-4621-ad1a-40ac399731e4\") " pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.339489 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6486f686c6-9kf5n"] Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.340333 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.344170 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnxpt\" (UniqueName: \"kubernetes.io/projected/d6b7564f-4ee7-4b23-93b1-8252ba326f2e-kube-api-access-qnxpt\") pod \"nmstate-webhook-f8fb84555-m9sxj\" (UID: \"d6b7564f-4ee7-4b23-93b1-8252ba326f2e\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.346305 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6486f686c6-9kf5n"] Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.362801 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.377494 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.400175 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znv8t\" (UniqueName: \"kubernetes.io/projected/e2e0c600-184b-4cc7-8b1f-ed0d37f7d141-kube-api-access-znv8t\") pod \"nmstate-console-plugin-6ff7998486-9zrkp\" (UID: \"e2e0c600-184b-4cc7-8b1f-ed0d37f7d141\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.400311 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/e2e0c600-184b-4cc7-8b1f-ed0d37f7d141-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-9zrkp\" (UID: \"e2e0c600-184b-4cc7-8b1f-ed0d37f7d141\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.400351 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/e2e0c600-184b-4cc7-8b1f-ed0d37f7d141-nginx-conf\") pod \"nmstate-console-plugin-6ff7998486-9zrkp\" (UID: \"e2e0c600-184b-4cc7-8b1f-ed0d37f7d141\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.401725 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/e2e0c600-184b-4cc7-8b1f-ed0d37f7d141-nginx-conf\") pod \"nmstate-console-plugin-6ff7998486-9zrkp\" (UID: \"e2e0c600-184b-4cc7-8b1f-ed0d37f7d141\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.406579 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/e2e0c600-184b-4cc7-8b1f-ed0d37f7d141-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-9zrkp\" (UID: \"e2e0c600-184b-4cc7-8b1f-ed0d37f7d141\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.431837 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znv8t\" (UniqueName: \"kubernetes.io/projected/e2e0c600-184b-4cc7-8b1f-ed0d37f7d141-kube-api-access-znv8t\") pod \"nmstate-console-plugin-6ff7998486-9zrkp\" (UID: \"e2e0c600-184b-4cc7-8b1f-ed0d37f7d141\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.442252 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.503845 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8946de35-8e21-4932-a8e3-260afbdd02a9-trusted-ca-bundle\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.503896 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgp4w\" (UniqueName: \"kubernetes.io/projected/8946de35-8e21-4932-a8e3-260afbdd02a9-kube-api-access-fgp4w\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.503979 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8946de35-8e21-4932-a8e3-260afbdd02a9-service-ca\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.504006 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8946de35-8e21-4932-a8e3-260afbdd02a9-console-config\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.504024 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8946de35-8e21-4932-a8e3-260afbdd02a9-console-oauth-config\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.504052 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8946de35-8e21-4932-a8e3-260afbdd02a9-oauth-serving-cert\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.504086 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8946de35-8e21-4932-a8e3-260afbdd02a9-console-serving-cert\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.606183 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8946de35-8e21-4932-a8e3-260afbdd02a9-service-ca\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.606235 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8946de35-8e21-4932-a8e3-260afbdd02a9-console-config\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.606256 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8946de35-8e21-4932-a8e3-260afbdd02a9-console-oauth-config\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.606283 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8946de35-8e21-4932-a8e3-260afbdd02a9-oauth-serving-cert\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.606308 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8946de35-8e21-4932-a8e3-260afbdd02a9-console-serving-cert\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.606335 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8946de35-8e21-4932-a8e3-260afbdd02a9-trusted-ca-bundle\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.606356 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgp4w\" (UniqueName: \"kubernetes.io/projected/8946de35-8e21-4932-a8e3-260afbdd02a9-kube-api-access-fgp4w\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.608672 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8946de35-8e21-4932-a8e3-260afbdd02a9-service-ca\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.609736 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8946de35-8e21-4932-a8e3-260afbdd02a9-oauth-serving-cert\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.609892 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8946de35-8e21-4932-a8e3-260afbdd02a9-console-config\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.612830 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8946de35-8e21-4932-a8e3-260afbdd02a9-console-oauth-config\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.613881 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8946de35-8e21-4932-a8e3-260afbdd02a9-console-serving-cert\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.616372 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8946de35-8e21-4932-a8e3-260afbdd02a9-trusted-ca-bundle\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.627509 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgp4w\" (UniqueName: \"kubernetes.io/projected/8946de35-8e21-4932-a8e3-260afbdd02a9-kube-api-access-fgp4w\") pod \"console-6486f686c6-9kf5n\" (UID: \"8946de35-8e21-4932-a8e3-260afbdd02a9\") " pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.678558 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.858353 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f7f7578db-tjmr9"] Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.877925 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-rh8g6" event={"ID":"10985894-27f3-4621-ad1a-40ac399731e4","Type":"ContainerStarted","Data":"7c3652bdcd79df16664e3000b011a3449bf1c5aa5d69b55daed2b69f8e46faeb"} Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.878840 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-tjmr9" event={"ID":"f0efdd39-6cba-42a7-9222-9084987431a7","Type":"ContainerStarted","Data":"9fbaf5d29f0d919e3e328c3864d40c250170e9a694e1b41126a37aba1d232f9e"} Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.907920 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp"] Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.922075 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj"] Jan 06 08:27:22 crc kubenswrapper[4784]: W0106 08:27:22.925226 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd6b7564f_4ee7_4b23_93b1_8252ba326f2e.slice/crio-4b590af34a5e50cf23aed2cfed65dfb8fb562784efc6e3ca8fa182c0ef47cd96 WatchSource:0}: Error finding container 4b590af34a5e50cf23aed2cfed65dfb8fb562784efc6e3ca8fa182c0ef47cd96: Status 404 returned error can't find the container with id 4b590af34a5e50cf23aed2cfed65dfb8fb562784efc6e3ca8fa182c0ef47cd96 Jan 06 08:27:22 crc kubenswrapper[4784]: I0106 08:27:22.927950 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6486f686c6-9kf5n"] Jan 06 08:27:22 crc kubenswrapper[4784]: W0106 08:27:22.935984 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8946de35_8e21_4932_a8e3_260afbdd02a9.slice/crio-38368bcd2499ad78edb6d9938e87707853b39851e2e511b7ff6e4e3dc0a0368f WatchSource:0}: Error finding container 38368bcd2499ad78edb6d9938e87707853b39851e2e511b7ff6e4e3dc0a0368f: Status 404 returned error can't find the container with id 38368bcd2499ad78edb6d9938e87707853b39851e2e511b7ff6e4e3dc0a0368f Jan 06 08:27:23 crc kubenswrapper[4784]: I0106 08:27:23.886944 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6486f686c6-9kf5n" event={"ID":"8946de35-8e21-4932-a8e3-260afbdd02a9","Type":"ContainerStarted","Data":"fef2ca35283f835ed28c7be9391a6328e982135418db17556333a0afd5402f44"} Jan 06 08:27:23 crc kubenswrapper[4784]: I0106 08:27:23.887360 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6486f686c6-9kf5n" event={"ID":"8946de35-8e21-4932-a8e3-260afbdd02a9","Type":"ContainerStarted","Data":"38368bcd2499ad78edb6d9938e87707853b39851e2e511b7ff6e4e3dc0a0368f"} Jan 06 08:27:23 crc kubenswrapper[4784]: I0106 08:27:23.888992 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp" event={"ID":"e2e0c600-184b-4cc7-8b1f-ed0d37f7d141","Type":"ContainerStarted","Data":"e6fc05290ec5cc22bab50abbf93fbfb1ea98179227628afb105ea95e8f18e8ac"} Jan 06 08:27:23 crc kubenswrapper[4784]: I0106 08:27:23.892155 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj" event={"ID":"d6b7564f-4ee7-4b23-93b1-8252ba326f2e","Type":"ContainerStarted","Data":"4b590af34a5e50cf23aed2cfed65dfb8fb562784efc6e3ca8fa182c0ef47cd96"} Jan 06 08:27:23 crc kubenswrapper[4784]: I0106 08:27:23.914562 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6486f686c6-9kf5n" podStartSLOduration=1.9145170230000002 podStartE2EDuration="1.914517023s" podCreationTimestamp="2026-01-06 08:27:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:27:23.909588479 +0000 UTC m=+745.955761326" watchObservedRunningTime="2026-01-06 08:27:23.914517023 +0000 UTC m=+745.960689850" Jan 06 08:27:25 crc kubenswrapper[4784]: I0106 08:27:25.906257 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-tjmr9" event={"ID":"f0efdd39-6cba-42a7-9222-9084987431a7","Type":"ContainerStarted","Data":"a5ed3453a7e3766e7fd0442119cbaff983067c439ecaaf7143784bfddb908fd2"} Jan 06 08:27:25 crc kubenswrapper[4784]: I0106 08:27:25.907831 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-rh8g6" event={"ID":"10985894-27f3-4621-ad1a-40ac399731e4","Type":"ContainerStarted","Data":"184fa719bac624dae7090b6aac811f114dcfb0c56981f13fdbaf88cd3969bd99"} Jan 06 08:27:25 crc kubenswrapper[4784]: I0106 08:27:25.908082 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:25 crc kubenswrapper[4784]: I0106 08:27:25.910316 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp" event={"ID":"e2e0c600-184b-4cc7-8b1f-ed0d37f7d141","Type":"ContainerStarted","Data":"eea8d684322f8efb6138e7bb146673f33de645f38ac24b034f3276a262ebaa9b"} Jan 06 08:27:25 crc kubenswrapper[4784]: I0106 08:27:25.912950 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj" event={"ID":"d6b7564f-4ee7-4b23-93b1-8252ba326f2e","Type":"ContainerStarted","Data":"f2745a906b9f4f495cd56660a071f9a92c893bc2ff923e021531496d7f06c276"} Jan 06 08:27:25 crc kubenswrapper[4784]: I0106 08:27:25.913222 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj" Jan 06 08:27:25 crc kubenswrapper[4784]: I0106 08:27:25.930793 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-rh8g6" podStartSLOduration=1.939464975 podStartE2EDuration="4.930772307s" podCreationTimestamp="2026-01-06 08:27:21 +0000 UTC" firstStartedPulling="2026-01-06 08:27:22.503039392 +0000 UTC m=+744.549212229" lastFinishedPulling="2026-01-06 08:27:25.494346684 +0000 UTC m=+747.540519561" observedRunningTime="2026-01-06 08:27:25.922842757 +0000 UTC m=+747.969015614" watchObservedRunningTime="2026-01-06 08:27:25.930772307 +0000 UTC m=+747.976945154" Jan 06 08:27:25 crc kubenswrapper[4784]: I0106 08:27:25.943536 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj" podStartSLOduration=2.351474011 podStartE2EDuration="4.943504627s" podCreationTimestamp="2026-01-06 08:27:21 +0000 UTC" firstStartedPulling="2026-01-06 08:27:22.92848072 +0000 UTC m=+744.974653557" lastFinishedPulling="2026-01-06 08:27:25.520511296 +0000 UTC m=+747.566684173" observedRunningTime="2026-01-06 08:27:25.936843987 +0000 UTC m=+747.983016834" watchObservedRunningTime="2026-01-06 08:27:25.943504627 +0000 UTC m=+747.989677464" Jan 06 08:27:27 crc kubenswrapper[4784]: I0106 08:27:27.928842 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-tjmr9" event={"ID":"f0efdd39-6cba-42a7-9222-9084987431a7","Type":"ContainerStarted","Data":"5950b0e2fefa0e57932c92ed93c6023ac9c97d47eac546b4cab6af8a0e2a4eb8"} Jan 06 08:27:27 crc kubenswrapper[4784]: I0106 08:27:27.958898 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-9zrkp" podStartSLOduration=3.390177641 podStartE2EDuration="5.958858982s" podCreationTimestamp="2026-01-06 08:27:22 +0000 UTC" firstStartedPulling="2026-01-06 08:27:22.923841635 +0000 UTC m=+744.970014472" lastFinishedPulling="2026-01-06 08:27:25.492522966 +0000 UTC m=+747.538695813" observedRunningTime="2026-01-06 08:27:25.952958384 +0000 UTC m=+747.999131251" watchObservedRunningTime="2026-01-06 08:27:27.958858982 +0000 UTC m=+750.005031829" Jan 06 08:27:32 crc kubenswrapper[4784]: I0106 08:27:32.420264 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-rh8g6" Jan 06 08:27:32 crc kubenswrapper[4784]: I0106 08:27:32.441935 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-tjmr9" podStartSLOduration=6.646194759 podStartE2EDuration="11.441904088s" podCreationTimestamp="2026-01-06 08:27:21 +0000 UTC" firstStartedPulling="2026-01-06 08:27:22.875825626 +0000 UTC m=+744.921998463" lastFinishedPulling="2026-01-06 08:27:27.671534945 +0000 UTC m=+749.717707792" observedRunningTime="2026-01-06 08:27:27.960002449 +0000 UTC m=+750.006175296" watchObservedRunningTime="2026-01-06 08:27:32.441904088 +0000 UTC m=+754.488076965" Jan 06 08:27:32 crc kubenswrapper[4784]: I0106 08:27:32.680269 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:32 crc kubenswrapper[4784]: I0106 08:27:32.680343 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:32 crc kubenswrapper[4784]: I0106 08:27:32.689973 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:32 crc kubenswrapper[4784]: I0106 08:27:32.971054 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6486f686c6-9kf5n" Jan 06 08:27:33 crc kubenswrapper[4784]: I0106 08:27:33.044886 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-q2d7x"] Jan 06 08:27:35 crc kubenswrapper[4784]: I0106 08:27:35.400851 4784 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 06 08:27:42 crc kubenswrapper[4784]: I0106 08:27:42.374440 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-f8fb84555-m9sxj" Jan 06 08:27:44 crc kubenswrapper[4784]: I0106 08:27:44.351586 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:27:44 crc kubenswrapper[4784]: I0106 08:27:44.351661 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.128216 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-q2d7x" podUID="ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0" containerName="console" containerID="cri-o://d1599b6f3347f59f8c03e2058e79726d968026fe2648b65de6526d3eac3ca88f" gracePeriod=15 Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.460560 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2"] Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.462787 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.465706 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.471228 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2"] Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.551237 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-q2d7x_ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0/console/0.log" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.551352 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.584859 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2f835431-ce80-49f1-a8b5-6fc6319cfe13-util\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2\" (UID: \"2f835431-ce80-49f1-a8b5-6fc6319cfe13\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.585093 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mv4lm\" (UniqueName: \"kubernetes.io/projected/2f835431-ce80-49f1-a8b5-6fc6319cfe13-kube-api-access-mv4lm\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2\" (UID: \"2f835431-ce80-49f1-a8b5-6fc6319cfe13\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.585143 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2f835431-ce80-49f1-a8b5-6fc6319cfe13-bundle\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2\" (UID: \"2f835431-ce80-49f1-a8b5-6fc6319cfe13\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.638662 4784 scope.go:117] "RemoveContainer" containerID="d1599b6f3347f59f8c03e2058e79726d968026fe2648b65de6526d3eac3ca88f" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.686254 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-service-ca\") pod \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.686313 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-oauth-serving-cert\") pod \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.686339 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-config\") pod \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.686374 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-serving-cert\") pod \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.686402 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7ns5\" (UniqueName: \"kubernetes.io/projected/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-kube-api-access-f7ns5\") pod \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.686448 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-trusted-ca-bundle\") pod \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.686481 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-oauth-config\") pod \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\" (UID: \"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0\") " Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.686775 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mv4lm\" (UniqueName: \"kubernetes.io/projected/2f835431-ce80-49f1-a8b5-6fc6319cfe13-kube-api-access-mv4lm\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2\" (UID: \"2f835431-ce80-49f1-a8b5-6fc6319cfe13\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.686816 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2f835431-ce80-49f1-a8b5-6fc6319cfe13-bundle\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2\" (UID: \"2f835431-ce80-49f1-a8b5-6fc6319cfe13\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.686915 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2f835431-ce80-49f1-a8b5-6fc6319cfe13-util\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2\" (UID: \"2f835431-ce80-49f1-a8b5-6fc6319cfe13\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.687582 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2f835431-ce80-49f1-a8b5-6fc6319cfe13-util\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2\" (UID: \"2f835431-ce80-49f1-a8b5-6fc6319cfe13\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.687879 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-config" (OuterVolumeSpecName: "console-config") pod "ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0" (UID: "ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.687948 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0" (UID: "ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.687972 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0" (UID: "ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.687980 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-service-ca" (OuterVolumeSpecName: "service-ca") pod "ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0" (UID: "ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.688090 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2f835431-ce80-49f1-a8b5-6fc6319cfe13-bundle\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2\" (UID: \"2f835431-ce80-49f1-a8b5-6fc6319cfe13\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.694029 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0" (UID: "ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.694795 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0" (UID: "ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.698032 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-kube-api-access-f7ns5" (OuterVolumeSpecName: "kube-api-access-f7ns5") pod "ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0" (UID: "ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0"). InnerVolumeSpecName "kube-api-access-f7ns5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.708773 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mv4lm\" (UniqueName: \"kubernetes.io/projected/2f835431-ce80-49f1-a8b5-6fc6319cfe13-kube-api-access-mv4lm\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2\" (UID: \"2f835431-ce80-49f1-a8b5-6fc6319cfe13\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.787214 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.789110 4784 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.789182 4784 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-service-ca\") on node \"crc\" DevicePath \"\"" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.789212 4784 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.789237 4784 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.789256 4784 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.789274 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7ns5\" (UniqueName: \"kubernetes.io/projected/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-kube-api-access-f7ns5\") on node \"crc\" DevicePath \"\"" Jan 06 08:27:58 crc kubenswrapper[4784]: I0106 08:27:58.789294 4784 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:27:59 crc kubenswrapper[4784]: I0106 08:27:59.050568 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2"] Jan 06 08:27:59 crc kubenswrapper[4784]: W0106 08:27:59.057419 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2f835431_ce80_49f1_a8b5_6fc6319cfe13.slice/crio-30bb779c01815c6be41afc54b1657b3ce80728c5f0310c70eab29d282d468d95 WatchSource:0}: Error finding container 30bb779c01815c6be41afc54b1657b3ce80728c5f0310c70eab29d282d468d95: Status 404 returned error can't find the container with id 30bb779c01815c6be41afc54b1657b3ce80728c5f0310c70eab29d282d468d95 Jan 06 08:27:59 crc kubenswrapper[4784]: I0106 08:27:59.166139 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" event={"ID":"2f835431-ce80-49f1-a8b5-6fc6319cfe13","Type":"ContainerStarted","Data":"30bb779c01815c6be41afc54b1657b3ce80728c5f0310c70eab29d282d468d95"} Jan 06 08:27:59 crc kubenswrapper[4784]: I0106 08:27:59.166190 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-q2d7x" Jan 06 08:27:59 crc kubenswrapper[4784]: I0106 08:27:59.166208 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-q2d7x" event={"ID":"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0","Type":"ContainerDied","Data":"d1599b6f3347f59f8c03e2058e79726d968026fe2648b65de6526d3eac3ca88f"} Jan 06 08:27:59 crc kubenswrapper[4784]: I0106 08:27:59.166358 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-q2d7x" event={"ID":"ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0","Type":"ContainerDied","Data":"b738c5a6d0a1e5c4929f19c8fb7a840bff39e110470b7563c107382ed29654d1"} Jan 06 08:27:59 crc kubenswrapper[4784]: I0106 08:27:59.223112 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-q2d7x"] Jan 06 08:27:59 crc kubenswrapper[4784]: I0106 08:27:59.232139 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-q2d7x"] Jan 06 08:28:00 crc kubenswrapper[4784]: I0106 08:28:00.177804 4784 generic.go:334] "Generic (PLEG): container finished" podID="2f835431-ce80-49f1-a8b5-6fc6319cfe13" containerID="79289ae428c13d81844d9824c9df6f26638bfa04e38764599c94b13f4be10fac" exitCode=0 Jan 06 08:28:00 crc kubenswrapper[4784]: I0106 08:28:00.177934 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" event={"ID":"2f835431-ce80-49f1-a8b5-6fc6319cfe13","Type":"ContainerDied","Data":"79289ae428c13d81844d9824c9df6f26638bfa04e38764599c94b13f4be10fac"} Jan 06 08:28:00 crc kubenswrapper[4784]: I0106 08:28:00.324112 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0" path="/var/lib/kubelet/pods/ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0/volumes" Jan 06 08:28:00 crc kubenswrapper[4784]: I0106 08:28:00.797926 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lthnz"] Jan 06 08:28:00 crc kubenswrapper[4784]: E0106 08:28:00.798311 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0" containerName="console" Jan 06 08:28:00 crc kubenswrapper[4784]: I0106 08:28:00.798329 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0" containerName="console" Jan 06 08:28:00 crc kubenswrapper[4784]: I0106 08:28:00.798460 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed32fa0e-4e6b-4cae-a4ec-5733e7beaac0" containerName="console" Jan 06 08:28:00 crc kubenswrapper[4784]: I0106 08:28:00.799506 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:00 crc kubenswrapper[4784]: I0106 08:28:00.816534 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lthnz"] Jan 06 08:28:00 crc kubenswrapper[4784]: I0106 08:28:00.927624 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rlql\" (UniqueName: \"kubernetes.io/projected/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-kube-api-access-2rlql\") pod \"redhat-operators-lthnz\" (UID: \"a4d8342b-e69e-4e39-8dca-568d5b61b0a5\") " pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:00 crc kubenswrapper[4784]: I0106 08:28:00.927711 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-utilities\") pod \"redhat-operators-lthnz\" (UID: \"a4d8342b-e69e-4e39-8dca-568d5b61b0a5\") " pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:00 crc kubenswrapper[4784]: I0106 08:28:00.927736 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-catalog-content\") pod \"redhat-operators-lthnz\" (UID: \"a4d8342b-e69e-4e39-8dca-568d5b61b0a5\") " pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:01 crc kubenswrapper[4784]: I0106 08:28:01.030169 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rlql\" (UniqueName: \"kubernetes.io/projected/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-kube-api-access-2rlql\") pod \"redhat-operators-lthnz\" (UID: \"a4d8342b-e69e-4e39-8dca-568d5b61b0a5\") " pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:01 crc kubenswrapper[4784]: I0106 08:28:01.030239 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-utilities\") pod \"redhat-operators-lthnz\" (UID: \"a4d8342b-e69e-4e39-8dca-568d5b61b0a5\") " pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:01 crc kubenswrapper[4784]: I0106 08:28:01.030265 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-catalog-content\") pod \"redhat-operators-lthnz\" (UID: \"a4d8342b-e69e-4e39-8dca-568d5b61b0a5\") " pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:01 crc kubenswrapper[4784]: I0106 08:28:01.030938 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-utilities\") pod \"redhat-operators-lthnz\" (UID: \"a4d8342b-e69e-4e39-8dca-568d5b61b0a5\") " pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:01 crc kubenswrapper[4784]: I0106 08:28:01.031046 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-catalog-content\") pod \"redhat-operators-lthnz\" (UID: \"a4d8342b-e69e-4e39-8dca-568d5b61b0a5\") " pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:01 crc kubenswrapper[4784]: I0106 08:28:01.058773 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rlql\" (UniqueName: \"kubernetes.io/projected/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-kube-api-access-2rlql\") pod \"redhat-operators-lthnz\" (UID: \"a4d8342b-e69e-4e39-8dca-568d5b61b0a5\") " pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:01 crc kubenswrapper[4784]: I0106 08:28:01.176943 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:01 crc kubenswrapper[4784]: I0106 08:28:01.441594 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lthnz"] Jan 06 08:28:01 crc kubenswrapper[4784]: W0106 08:28:01.453151 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda4d8342b_e69e_4e39_8dca_568d5b61b0a5.slice/crio-8cbc27a4b2499a5b4bd2e155aacebf600ff1ff097799d299b03cdac93d26f5c7 WatchSource:0}: Error finding container 8cbc27a4b2499a5b4bd2e155aacebf600ff1ff097799d299b03cdac93d26f5c7: Status 404 returned error can't find the container with id 8cbc27a4b2499a5b4bd2e155aacebf600ff1ff097799d299b03cdac93d26f5c7 Jan 06 08:28:02 crc kubenswrapper[4784]: I0106 08:28:02.192291 4784 generic.go:334] "Generic (PLEG): container finished" podID="2f835431-ce80-49f1-a8b5-6fc6319cfe13" containerID="548f46fb971048fcf0086d9d2dfab639bae0c77a8de7c77b1f250b84f93039af" exitCode=0 Jan 06 08:28:02 crc kubenswrapper[4784]: I0106 08:28:02.192406 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" event={"ID":"2f835431-ce80-49f1-a8b5-6fc6319cfe13","Type":"ContainerDied","Data":"548f46fb971048fcf0086d9d2dfab639bae0c77a8de7c77b1f250b84f93039af"} Jan 06 08:28:02 crc kubenswrapper[4784]: I0106 08:28:02.195487 4784 generic.go:334] "Generic (PLEG): container finished" podID="a4d8342b-e69e-4e39-8dca-568d5b61b0a5" containerID="6b8cfd40f2fb9e18262e7c42340ebb5b4f6f6a2796a8b0f5b13487358a88b762" exitCode=0 Jan 06 08:28:02 crc kubenswrapper[4784]: I0106 08:28:02.195530 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lthnz" event={"ID":"a4d8342b-e69e-4e39-8dca-568d5b61b0a5","Type":"ContainerDied","Data":"6b8cfd40f2fb9e18262e7c42340ebb5b4f6f6a2796a8b0f5b13487358a88b762"} Jan 06 08:28:02 crc kubenswrapper[4784]: I0106 08:28:02.195587 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lthnz" event={"ID":"a4d8342b-e69e-4e39-8dca-568d5b61b0a5","Type":"ContainerStarted","Data":"8cbc27a4b2499a5b4bd2e155aacebf600ff1ff097799d299b03cdac93d26f5c7"} Jan 06 08:28:03 crc kubenswrapper[4784]: I0106 08:28:03.209247 4784 generic.go:334] "Generic (PLEG): container finished" podID="2f835431-ce80-49f1-a8b5-6fc6319cfe13" containerID="ad5ee34ff87e7bbd6106ddbba248cb2edbfee402de677272f163460a6f3b0566" exitCode=0 Jan 06 08:28:03 crc kubenswrapper[4784]: I0106 08:28:03.209327 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" event={"ID":"2f835431-ce80-49f1-a8b5-6fc6319cfe13","Type":"ContainerDied","Data":"ad5ee34ff87e7bbd6106ddbba248cb2edbfee402de677272f163460a6f3b0566"} Jan 06 08:28:04 crc kubenswrapper[4784]: I0106 08:28:04.217807 4784 generic.go:334] "Generic (PLEG): container finished" podID="a4d8342b-e69e-4e39-8dca-568d5b61b0a5" containerID="56e8e9d419dc8eebf4ba45a7f386b22081de56ec5e8d8d96a43ac4a1b8ad50b4" exitCode=0 Jan 06 08:28:04 crc kubenswrapper[4784]: I0106 08:28:04.217858 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lthnz" event={"ID":"a4d8342b-e69e-4e39-8dca-568d5b61b0a5","Type":"ContainerDied","Data":"56e8e9d419dc8eebf4ba45a7f386b22081de56ec5e8d8d96a43ac4a1b8ad50b4"} Jan 06 08:28:04 crc kubenswrapper[4784]: I0106 08:28:04.449537 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" Jan 06 08:28:04 crc kubenswrapper[4784]: I0106 08:28:04.586510 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2f835431-ce80-49f1-a8b5-6fc6319cfe13-util\") pod \"2f835431-ce80-49f1-a8b5-6fc6319cfe13\" (UID: \"2f835431-ce80-49f1-a8b5-6fc6319cfe13\") " Jan 06 08:28:04 crc kubenswrapper[4784]: I0106 08:28:04.586745 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mv4lm\" (UniqueName: \"kubernetes.io/projected/2f835431-ce80-49f1-a8b5-6fc6319cfe13-kube-api-access-mv4lm\") pod \"2f835431-ce80-49f1-a8b5-6fc6319cfe13\" (UID: \"2f835431-ce80-49f1-a8b5-6fc6319cfe13\") " Jan 06 08:28:04 crc kubenswrapper[4784]: I0106 08:28:04.586767 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2f835431-ce80-49f1-a8b5-6fc6319cfe13-bundle\") pod \"2f835431-ce80-49f1-a8b5-6fc6319cfe13\" (UID: \"2f835431-ce80-49f1-a8b5-6fc6319cfe13\") " Jan 06 08:28:04 crc kubenswrapper[4784]: I0106 08:28:04.591154 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f835431-ce80-49f1-a8b5-6fc6319cfe13-bundle" (OuterVolumeSpecName: "bundle") pod "2f835431-ce80-49f1-a8b5-6fc6319cfe13" (UID: "2f835431-ce80-49f1-a8b5-6fc6319cfe13"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:28:04 crc kubenswrapper[4784]: I0106 08:28:04.594785 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f835431-ce80-49f1-a8b5-6fc6319cfe13-kube-api-access-mv4lm" (OuterVolumeSpecName: "kube-api-access-mv4lm") pod "2f835431-ce80-49f1-a8b5-6fc6319cfe13" (UID: "2f835431-ce80-49f1-a8b5-6fc6319cfe13"). InnerVolumeSpecName "kube-api-access-mv4lm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:28:04 crc kubenswrapper[4784]: I0106 08:28:04.596519 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f835431-ce80-49f1-a8b5-6fc6319cfe13-util" (OuterVolumeSpecName: "util") pod "2f835431-ce80-49f1-a8b5-6fc6319cfe13" (UID: "2f835431-ce80-49f1-a8b5-6fc6319cfe13"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:28:04 crc kubenswrapper[4784]: I0106 08:28:04.688732 4784 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2f835431-ce80-49f1-a8b5-6fc6319cfe13-util\") on node \"crc\" DevicePath \"\"" Jan 06 08:28:04 crc kubenswrapper[4784]: I0106 08:28:04.688799 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mv4lm\" (UniqueName: \"kubernetes.io/projected/2f835431-ce80-49f1-a8b5-6fc6319cfe13-kube-api-access-mv4lm\") on node \"crc\" DevicePath \"\"" Jan 06 08:28:04 crc kubenswrapper[4784]: I0106 08:28:04.688818 4784 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2f835431-ce80-49f1-a8b5-6fc6319cfe13-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:28:05 crc kubenswrapper[4784]: I0106 08:28:05.242115 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" event={"ID":"2f835431-ce80-49f1-a8b5-6fc6319cfe13","Type":"ContainerDied","Data":"30bb779c01815c6be41afc54b1657b3ce80728c5f0310c70eab29d282d468d95"} Jan 06 08:28:05 crc kubenswrapper[4784]: I0106 08:28:05.242203 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="30bb779c01815c6be41afc54b1657b3ce80728c5f0310c70eab29d282d468d95" Jan 06 08:28:05 crc kubenswrapper[4784]: I0106 08:28:05.244018 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2" Jan 06 08:28:06 crc kubenswrapper[4784]: I0106 08:28:06.254435 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lthnz" event={"ID":"a4d8342b-e69e-4e39-8dca-568d5b61b0a5","Type":"ContainerStarted","Data":"32772ff3a1ffe9ed9a0a5980cf5bdfb47e5e4c5bedfcdbfc599d90972c80c5a5"} Jan 06 08:28:06 crc kubenswrapper[4784]: I0106 08:28:06.288088 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lthnz" podStartSLOduration=3.424517716 podStartE2EDuration="6.288057273s" podCreationTimestamp="2026-01-06 08:28:00 +0000 UTC" firstStartedPulling="2026-01-06 08:28:02.197355967 +0000 UTC m=+784.243528804" lastFinishedPulling="2026-01-06 08:28:05.060895484 +0000 UTC m=+787.107068361" observedRunningTime="2026-01-06 08:28:06.2815824 +0000 UTC m=+788.327755267" watchObservedRunningTime="2026-01-06 08:28:06.288057273 +0000 UTC m=+788.334230140" Jan 06 08:28:11 crc kubenswrapper[4784]: I0106 08:28:11.177139 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:11 crc kubenswrapper[4784]: I0106 08:28:11.177577 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:12 crc kubenswrapper[4784]: I0106 08:28:12.270041 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lthnz" podUID="a4d8342b-e69e-4e39-8dca-568d5b61b0a5" containerName="registry-server" probeResult="failure" output=< Jan 06 08:28:12 crc kubenswrapper[4784]: timeout: failed to connect service ":50051" within 1s Jan 06 08:28:12 crc kubenswrapper[4784]: > Jan 06 08:28:14 crc kubenswrapper[4784]: I0106 08:28:14.351019 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:28:14 crc kubenswrapper[4784]: I0106 08:28:14.351620 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:28:14 crc kubenswrapper[4784]: I0106 08:28:14.351687 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:28:14 crc kubenswrapper[4784]: I0106 08:28:14.352382 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"79b856f10bb460704a3c69053ad2f54af7d0f23c85d18e02491af96b533c786d"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 08:28:14 crc kubenswrapper[4784]: I0106 08:28:14.352465 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://79b856f10bb460704a3c69053ad2f54af7d0f23c85d18e02491af96b533c786d" gracePeriod=600 Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.138037 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6"] Jan 06 08:28:15 crc kubenswrapper[4784]: E0106 08:28:15.138370 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f835431-ce80-49f1-a8b5-6fc6319cfe13" containerName="extract" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.138387 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f835431-ce80-49f1-a8b5-6fc6319cfe13" containerName="extract" Jan 06 08:28:15 crc kubenswrapper[4784]: E0106 08:28:15.138408 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f835431-ce80-49f1-a8b5-6fc6319cfe13" containerName="pull" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.138414 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f835431-ce80-49f1-a8b5-6fc6319cfe13" containerName="pull" Jan 06 08:28:15 crc kubenswrapper[4784]: E0106 08:28:15.138424 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f835431-ce80-49f1-a8b5-6fc6319cfe13" containerName="util" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.138431 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f835431-ce80-49f1-a8b5-6fc6319cfe13" containerName="util" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.138598 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f835431-ce80-49f1-a8b5-6fc6319cfe13" containerName="extract" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.139271 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.143638 4784 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.143752 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.145024 4784 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-d7gfp" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.145178 4784 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.145357 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.159166 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mv6z2\" (UniqueName: \"kubernetes.io/projected/fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea-kube-api-access-mv6z2\") pod \"metallb-operator-controller-manager-664557b5f7-d75n6\" (UID: \"fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea\") " pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.159241 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea-webhook-cert\") pod \"metallb-operator-controller-manager-664557b5f7-d75n6\" (UID: \"fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea\") " pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.159266 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea-apiservice-cert\") pod \"metallb-operator-controller-manager-664557b5f7-d75n6\" (UID: \"fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea\") " pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.170855 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6"] Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.260797 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mv6z2\" (UniqueName: \"kubernetes.io/projected/fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea-kube-api-access-mv6z2\") pod \"metallb-operator-controller-manager-664557b5f7-d75n6\" (UID: \"fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea\") " pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.260858 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea-webhook-cert\") pod \"metallb-operator-controller-manager-664557b5f7-d75n6\" (UID: \"fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea\") " pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.260885 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea-apiservice-cert\") pod \"metallb-operator-controller-manager-664557b5f7-d75n6\" (UID: \"fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea\") " pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.271492 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea-apiservice-cert\") pod \"metallb-operator-controller-manager-664557b5f7-d75n6\" (UID: \"fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea\") " pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.275199 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea-webhook-cert\") pod \"metallb-operator-controller-manager-664557b5f7-d75n6\" (UID: \"fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea\") " pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.294222 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mv6z2\" (UniqueName: \"kubernetes.io/projected/fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea-kube-api-access-mv6z2\") pod \"metallb-operator-controller-manager-664557b5f7-d75n6\" (UID: \"fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea\") " pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.454801 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.493217 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf"] Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.493947 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.496162 4784 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.496505 4784 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-g5mlj" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.499062 4784 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.514684 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf"] Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.681303 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a53a2791-cbe2-49ba-838a-9c79c130186f-apiservice-cert\") pod \"metallb-operator-webhook-server-5ff8cf5854-j76qf\" (UID: \"a53a2791-cbe2-49ba-838a-9c79c130186f\") " pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.681375 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbc4m\" (UniqueName: \"kubernetes.io/projected/a53a2791-cbe2-49ba-838a-9c79c130186f-kube-api-access-bbc4m\") pod \"metallb-operator-webhook-server-5ff8cf5854-j76qf\" (UID: \"a53a2791-cbe2-49ba-838a-9c79c130186f\") " pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.681438 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a53a2791-cbe2-49ba-838a-9c79c130186f-webhook-cert\") pod \"metallb-operator-webhook-server-5ff8cf5854-j76qf\" (UID: \"a53a2791-cbe2-49ba-838a-9c79c130186f\") " pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.783510 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a53a2791-cbe2-49ba-838a-9c79c130186f-apiservice-cert\") pod \"metallb-operator-webhook-server-5ff8cf5854-j76qf\" (UID: \"a53a2791-cbe2-49ba-838a-9c79c130186f\") " pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.783679 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbc4m\" (UniqueName: \"kubernetes.io/projected/a53a2791-cbe2-49ba-838a-9c79c130186f-kube-api-access-bbc4m\") pod \"metallb-operator-webhook-server-5ff8cf5854-j76qf\" (UID: \"a53a2791-cbe2-49ba-838a-9c79c130186f\") " pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.783793 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a53a2791-cbe2-49ba-838a-9c79c130186f-webhook-cert\") pod \"metallb-operator-webhook-server-5ff8cf5854-j76qf\" (UID: \"a53a2791-cbe2-49ba-838a-9c79c130186f\") " pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.792715 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a53a2791-cbe2-49ba-838a-9c79c130186f-webhook-cert\") pod \"metallb-operator-webhook-server-5ff8cf5854-j76qf\" (UID: \"a53a2791-cbe2-49ba-838a-9c79c130186f\") " pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.809260 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a53a2791-cbe2-49ba-838a-9c79c130186f-apiservice-cert\") pod \"metallb-operator-webhook-server-5ff8cf5854-j76qf\" (UID: \"a53a2791-cbe2-49ba-838a-9c79c130186f\") " pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.814929 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbc4m\" (UniqueName: \"kubernetes.io/projected/a53a2791-cbe2-49ba-838a-9c79c130186f-kube-api-access-bbc4m\") pod \"metallb-operator-webhook-server-5ff8cf5854-j76qf\" (UID: \"a53a2791-cbe2-49ba-838a-9c79c130186f\") " pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.829426 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" Jan 06 08:28:15 crc kubenswrapper[4784]: I0106 08:28:15.835019 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6"] Jan 06 08:28:16 crc kubenswrapper[4784]: I0106 08:28:16.069722 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf"] Jan 06 08:28:16 crc kubenswrapper[4784]: W0106 08:28:16.076360 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda53a2791_cbe2_49ba_838a_9c79c130186f.slice/crio-3ec4003531ee7f4401e1b48a7659a5e9dc5260b272630a09a6e056b5c7bb25c5 WatchSource:0}: Error finding container 3ec4003531ee7f4401e1b48a7659a5e9dc5260b272630a09a6e056b5c7bb25c5: Status 404 returned error can't find the container with id 3ec4003531ee7f4401e1b48a7659a5e9dc5260b272630a09a6e056b5c7bb25c5 Jan 06 08:28:16 crc kubenswrapper[4784]: I0106 08:28:16.318185 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" event={"ID":"fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea","Type":"ContainerStarted","Data":"7cefa30b295cd4fa7da3bc389ed33d9252f4b01f36fb5936b4b55d8202d1145b"} Jan 06 08:28:16 crc kubenswrapper[4784]: I0106 08:28:16.318225 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" event={"ID":"a53a2791-cbe2-49ba-838a-9c79c130186f","Type":"ContainerStarted","Data":"3ec4003531ee7f4401e1b48a7659a5e9dc5260b272630a09a6e056b5c7bb25c5"} Jan 06 08:28:16 crc kubenswrapper[4784]: I0106 08:28:16.319601 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="79b856f10bb460704a3c69053ad2f54af7d0f23c85d18e02491af96b533c786d" exitCode=0 Jan 06 08:28:16 crc kubenswrapper[4784]: I0106 08:28:16.319640 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"79b856f10bb460704a3c69053ad2f54af7d0f23c85d18e02491af96b533c786d"} Jan 06 08:28:16 crc kubenswrapper[4784]: I0106 08:28:16.319671 4784 scope.go:117] "RemoveContainer" containerID="40c7444c53886fd498a697f6b12ba8e4e849b37b3d1846fc709df19e375e4a8c" Jan 06 08:28:17 crc kubenswrapper[4784]: I0106 08:28:17.330397 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"9cd4e5f96c6907f66a8c281dacda866138cb3ec7ef90bd2f8123d427c09cf064"} Jan 06 08:28:21 crc kubenswrapper[4784]: I0106 08:28:21.224049 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:21 crc kubenswrapper[4784]: I0106 08:28:21.276068 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:21 crc kubenswrapper[4784]: I0106 08:28:21.361668 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" event={"ID":"fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea","Type":"ContainerStarted","Data":"44ef99318957baa4b87b6051cb886eeb90bf8b9c2ca99deccee0540b25005665"} Jan 06 08:28:21 crc kubenswrapper[4784]: I0106 08:28:21.361785 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" Jan 06 08:28:21 crc kubenswrapper[4784]: I0106 08:28:21.363893 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" event={"ID":"a53a2791-cbe2-49ba-838a-9c79c130186f","Type":"ContainerStarted","Data":"7fecd7595e0f2a078a10f832d0e880b38e8257932fe3e60093c62def80e3c3cd"} Jan 06 08:28:21 crc kubenswrapper[4784]: I0106 08:28:21.434137 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" podStartSLOduration=1.7112211739999998 podStartE2EDuration="6.434114635s" podCreationTimestamp="2026-01-06 08:28:15 +0000 UTC" firstStartedPulling="2026-01-06 08:28:16.084906895 +0000 UTC m=+798.131079732" lastFinishedPulling="2026-01-06 08:28:20.807800356 +0000 UTC m=+802.853973193" observedRunningTime="2026-01-06 08:28:21.430514752 +0000 UTC m=+803.476687589" watchObservedRunningTime="2026-01-06 08:28:21.434114635 +0000 UTC m=+803.480287472" Jan 06 08:28:21 crc kubenswrapper[4784]: I0106 08:28:21.436038 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" podStartSLOduration=1.498867741 podStartE2EDuration="6.436028125s" podCreationTimestamp="2026-01-06 08:28:15 +0000 UTC" firstStartedPulling="2026-01-06 08:28:15.851300334 +0000 UTC m=+797.897473171" lastFinishedPulling="2026-01-06 08:28:20.788460678 +0000 UTC m=+802.834633555" observedRunningTime="2026-01-06 08:28:21.396951817 +0000 UTC m=+803.443124654" watchObservedRunningTime="2026-01-06 08:28:21.436028125 +0000 UTC m=+803.482200962" Jan 06 08:28:21 crc kubenswrapper[4784]: I0106 08:28:21.585441 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lthnz"] Jan 06 08:28:22 crc kubenswrapper[4784]: I0106 08:28:22.371409 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" Jan 06 08:28:22 crc kubenswrapper[4784]: I0106 08:28:22.371677 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lthnz" podUID="a4d8342b-e69e-4e39-8dca-568d5b61b0a5" containerName="registry-server" containerID="cri-o://32772ff3a1ffe9ed9a0a5980cf5bdfb47e5e4c5bedfcdbfc599d90972c80c5a5" gracePeriod=2 Jan 06 08:28:22 crc kubenswrapper[4784]: I0106 08:28:22.769536 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:22 crc kubenswrapper[4784]: I0106 08:28:22.820033 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-utilities\") pod \"a4d8342b-e69e-4e39-8dca-568d5b61b0a5\" (UID: \"a4d8342b-e69e-4e39-8dca-568d5b61b0a5\") " Jan 06 08:28:22 crc kubenswrapper[4784]: I0106 08:28:22.820204 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rlql\" (UniqueName: \"kubernetes.io/projected/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-kube-api-access-2rlql\") pod \"a4d8342b-e69e-4e39-8dca-568d5b61b0a5\" (UID: \"a4d8342b-e69e-4e39-8dca-568d5b61b0a5\") " Jan 06 08:28:22 crc kubenswrapper[4784]: I0106 08:28:22.820251 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-catalog-content\") pod \"a4d8342b-e69e-4e39-8dca-568d5b61b0a5\" (UID: \"a4d8342b-e69e-4e39-8dca-568d5b61b0a5\") " Jan 06 08:28:22 crc kubenswrapper[4784]: I0106 08:28:22.822977 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-utilities" (OuterVolumeSpecName: "utilities") pod "a4d8342b-e69e-4e39-8dca-568d5b61b0a5" (UID: "a4d8342b-e69e-4e39-8dca-568d5b61b0a5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:28:22 crc kubenswrapper[4784]: I0106 08:28:22.843905 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-kube-api-access-2rlql" (OuterVolumeSpecName: "kube-api-access-2rlql") pod "a4d8342b-e69e-4e39-8dca-568d5b61b0a5" (UID: "a4d8342b-e69e-4e39-8dca-568d5b61b0a5"). InnerVolumeSpecName "kube-api-access-2rlql". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:28:22 crc kubenswrapper[4784]: I0106 08:28:22.921477 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:28:22 crc kubenswrapper[4784]: I0106 08:28:22.921529 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rlql\" (UniqueName: \"kubernetes.io/projected/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-kube-api-access-2rlql\") on node \"crc\" DevicePath \"\"" Jan 06 08:28:22 crc kubenswrapper[4784]: I0106 08:28:22.966256 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a4d8342b-e69e-4e39-8dca-568d5b61b0a5" (UID: "a4d8342b-e69e-4e39-8dca-568d5b61b0a5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.022854 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4d8342b-e69e-4e39-8dca-568d5b61b0a5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.379621 4784 generic.go:334] "Generic (PLEG): container finished" podID="a4d8342b-e69e-4e39-8dca-568d5b61b0a5" containerID="32772ff3a1ffe9ed9a0a5980cf5bdfb47e5e4c5bedfcdbfc599d90972c80c5a5" exitCode=0 Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.379697 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lthnz" Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.379742 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lthnz" event={"ID":"a4d8342b-e69e-4e39-8dca-568d5b61b0a5","Type":"ContainerDied","Data":"32772ff3a1ffe9ed9a0a5980cf5bdfb47e5e4c5bedfcdbfc599d90972c80c5a5"} Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.379773 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lthnz" event={"ID":"a4d8342b-e69e-4e39-8dca-568d5b61b0a5","Type":"ContainerDied","Data":"8cbc27a4b2499a5b4bd2e155aacebf600ff1ff097799d299b03cdac93d26f5c7"} Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.379791 4784 scope.go:117] "RemoveContainer" containerID="32772ff3a1ffe9ed9a0a5980cf5bdfb47e5e4c5bedfcdbfc599d90972c80c5a5" Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.404169 4784 scope.go:117] "RemoveContainer" containerID="56e8e9d419dc8eebf4ba45a7f386b22081de56ec5e8d8d96a43ac4a1b8ad50b4" Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.411492 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lthnz"] Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.414959 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lthnz"] Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.422335 4784 scope.go:117] "RemoveContainer" containerID="6b8cfd40f2fb9e18262e7c42340ebb5b4f6f6a2796a8b0f5b13487358a88b762" Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.444709 4784 scope.go:117] "RemoveContainer" containerID="32772ff3a1ffe9ed9a0a5980cf5bdfb47e5e4c5bedfcdbfc599d90972c80c5a5" Jan 06 08:28:23 crc kubenswrapper[4784]: E0106 08:28:23.445268 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32772ff3a1ffe9ed9a0a5980cf5bdfb47e5e4c5bedfcdbfc599d90972c80c5a5\": container with ID starting with 32772ff3a1ffe9ed9a0a5980cf5bdfb47e5e4c5bedfcdbfc599d90972c80c5a5 not found: ID does not exist" containerID="32772ff3a1ffe9ed9a0a5980cf5bdfb47e5e4c5bedfcdbfc599d90972c80c5a5" Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.445399 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32772ff3a1ffe9ed9a0a5980cf5bdfb47e5e4c5bedfcdbfc599d90972c80c5a5"} err="failed to get container status \"32772ff3a1ffe9ed9a0a5980cf5bdfb47e5e4c5bedfcdbfc599d90972c80c5a5\": rpc error: code = NotFound desc = could not find container \"32772ff3a1ffe9ed9a0a5980cf5bdfb47e5e4c5bedfcdbfc599d90972c80c5a5\": container with ID starting with 32772ff3a1ffe9ed9a0a5980cf5bdfb47e5e4c5bedfcdbfc599d90972c80c5a5 not found: ID does not exist" Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.445496 4784 scope.go:117] "RemoveContainer" containerID="56e8e9d419dc8eebf4ba45a7f386b22081de56ec5e8d8d96a43ac4a1b8ad50b4" Jan 06 08:28:23 crc kubenswrapper[4784]: E0106 08:28:23.446159 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56e8e9d419dc8eebf4ba45a7f386b22081de56ec5e8d8d96a43ac4a1b8ad50b4\": container with ID starting with 56e8e9d419dc8eebf4ba45a7f386b22081de56ec5e8d8d96a43ac4a1b8ad50b4 not found: ID does not exist" containerID="56e8e9d419dc8eebf4ba45a7f386b22081de56ec5e8d8d96a43ac4a1b8ad50b4" Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.446254 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56e8e9d419dc8eebf4ba45a7f386b22081de56ec5e8d8d96a43ac4a1b8ad50b4"} err="failed to get container status \"56e8e9d419dc8eebf4ba45a7f386b22081de56ec5e8d8d96a43ac4a1b8ad50b4\": rpc error: code = NotFound desc = could not find container \"56e8e9d419dc8eebf4ba45a7f386b22081de56ec5e8d8d96a43ac4a1b8ad50b4\": container with ID starting with 56e8e9d419dc8eebf4ba45a7f386b22081de56ec5e8d8d96a43ac4a1b8ad50b4 not found: ID does not exist" Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.446318 4784 scope.go:117] "RemoveContainer" containerID="6b8cfd40f2fb9e18262e7c42340ebb5b4f6f6a2796a8b0f5b13487358a88b762" Jan 06 08:28:23 crc kubenswrapper[4784]: E0106 08:28:23.446694 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b8cfd40f2fb9e18262e7c42340ebb5b4f6f6a2796a8b0f5b13487358a88b762\": container with ID starting with 6b8cfd40f2fb9e18262e7c42340ebb5b4f6f6a2796a8b0f5b13487358a88b762 not found: ID does not exist" containerID="6b8cfd40f2fb9e18262e7c42340ebb5b4f6f6a2796a8b0f5b13487358a88b762" Jan 06 08:28:23 crc kubenswrapper[4784]: I0106 08:28:23.446776 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b8cfd40f2fb9e18262e7c42340ebb5b4f6f6a2796a8b0f5b13487358a88b762"} err="failed to get container status \"6b8cfd40f2fb9e18262e7c42340ebb5b4f6f6a2796a8b0f5b13487358a88b762\": rpc error: code = NotFound desc = could not find container \"6b8cfd40f2fb9e18262e7c42340ebb5b4f6f6a2796a8b0f5b13487358a88b762\": container with ID starting with 6b8cfd40f2fb9e18262e7c42340ebb5b4f6f6a2796a8b0f5b13487358a88b762 not found: ID does not exist" Jan 06 08:28:24 crc kubenswrapper[4784]: I0106 08:28:24.342801 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4d8342b-e69e-4e39-8dca-568d5b61b0a5" path="/var/lib/kubelet/pods/a4d8342b-e69e-4e39-8dca-568d5b61b0a5/volumes" Jan 06 08:28:35 crc kubenswrapper[4784]: I0106 08:28:35.894983 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-5ff8cf5854-j76qf" Jan 06 08:28:55 crc kubenswrapper[4784]: I0106 08:28:55.460394 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-664557b5f7-d75n6" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.403510 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-tcrn4"] Jan 06 08:28:56 crc kubenswrapper[4784]: E0106 08:28:56.403927 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4d8342b-e69e-4e39-8dca-568d5b61b0a5" containerName="extract-utilities" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.403948 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4d8342b-e69e-4e39-8dca-568d5b61b0a5" containerName="extract-utilities" Jan 06 08:28:56 crc kubenswrapper[4784]: E0106 08:28:56.403964 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4d8342b-e69e-4e39-8dca-568d5b61b0a5" containerName="extract-content" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.403974 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4d8342b-e69e-4e39-8dca-568d5b61b0a5" containerName="extract-content" Jan 06 08:28:56 crc kubenswrapper[4784]: E0106 08:28:56.403985 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4d8342b-e69e-4e39-8dca-568d5b61b0a5" containerName="registry-server" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.403995 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4d8342b-e69e-4e39-8dca-568d5b61b0a5" containerName="registry-server" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.404187 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4d8342b-e69e-4e39-8dca-568d5b61b0a5" containerName="registry-server" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.407258 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.409953 4784 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.413773 4784 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-g8hns" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.413858 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.420530 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629"] Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.421511 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.426461 4784 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.428228 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5cabc455-2ed6-4b62-aba3-d7cdff292e99-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-sw629\" (UID: \"5cabc455-2ed6-4b62-aba3-d7cdff292e99\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.428263 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/310dd289-ef3f-4c2b-87bf-eec891361a6e-frr-startup\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.428296 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/310dd289-ef3f-4c2b-87bf-eec891361a6e-frr-sockets\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.428516 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/310dd289-ef3f-4c2b-87bf-eec891361a6e-metrics\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.428832 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/310dd289-ef3f-4c2b-87bf-eec891361a6e-metrics-certs\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.428911 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/310dd289-ef3f-4c2b-87bf-eec891361a6e-frr-conf\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.429047 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7k2wg\" (UniqueName: \"kubernetes.io/projected/5cabc455-2ed6-4b62-aba3-d7cdff292e99-kube-api-access-7k2wg\") pod \"frr-k8s-webhook-server-7784b6fcf-sw629\" (UID: \"5cabc455-2ed6-4b62-aba3-d7cdff292e99\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.429096 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/310dd289-ef3f-4c2b-87bf-eec891361a6e-reloader\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.429129 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjqh6\" (UniqueName: \"kubernetes.io/projected/310dd289-ef3f-4c2b-87bf-eec891361a6e-kube-api-access-zjqh6\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.439725 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629"] Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.521137 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-pg6rr"] Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.522508 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-pg6rr" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.524901 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.525052 4784 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-gdgkm" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.525291 4784 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.526396 4784 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.529771 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7k2wg\" (UniqueName: \"kubernetes.io/projected/5cabc455-2ed6-4b62-aba3-d7cdff292e99-kube-api-access-7k2wg\") pod \"frr-k8s-webhook-server-7784b6fcf-sw629\" (UID: \"5cabc455-2ed6-4b62-aba3-d7cdff292e99\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.529820 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/310dd289-ef3f-4c2b-87bf-eec891361a6e-reloader\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.529864 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjqh6\" (UniqueName: \"kubernetes.io/projected/310dd289-ef3f-4c2b-87bf-eec891361a6e-kube-api-access-zjqh6\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.529884 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5cabc455-2ed6-4b62-aba3-d7cdff292e99-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-sw629\" (UID: \"5cabc455-2ed6-4b62-aba3-d7cdff292e99\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.529927 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/310dd289-ef3f-4c2b-87bf-eec891361a6e-frr-startup\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.529960 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/310dd289-ef3f-4c2b-87bf-eec891361a6e-frr-sockets\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.529989 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/310dd289-ef3f-4c2b-87bf-eec891361a6e-metrics\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.530013 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gl64r\" (UniqueName: \"kubernetes.io/projected/3a8e306b-e5ee-4ced-b7be-f0a26248db92-kube-api-access-gl64r\") pod \"speaker-pg6rr\" (UID: \"3a8e306b-e5ee-4ced-b7be-f0a26248db92\") " pod="metallb-system/speaker-pg6rr" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.530041 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/3a8e306b-e5ee-4ced-b7be-f0a26248db92-metallb-excludel2\") pod \"speaker-pg6rr\" (UID: \"3a8e306b-e5ee-4ced-b7be-f0a26248db92\") " pod="metallb-system/speaker-pg6rr" Jan 06 08:28:56 crc kubenswrapper[4784]: E0106 08:28:56.530039 4784 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Jan 06 08:28:56 crc kubenswrapper[4784]: E0106 08:28:56.530157 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5cabc455-2ed6-4b62-aba3-d7cdff292e99-cert podName:5cabc455-2ed6-4b62-aba3-d7cdff292e99 nodeName:}" failed. No retries permitted until 2026-01-06 08:28:57.030132834 +0000 UTC m=+839.076305671 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/5cabc455-2ed6-4b62-aba3-d7cdff292e99-cert") pod "frr-k8s-webhook-server-7784b6fcf-sw629" (UID: "5cabc455-2ed6-4b62-aba3-d7cdff292e99") : secret "frr-k8s-webhook-server-cert" not found Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.530182 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3a8e306b-e5ee-4ced-b7be-f0a26248db92-memberlist\") pod \"speaker-pg6rr\" (UID: \"3a8e306b-e5ee-4ced-b7be-f0a26248db92\") " pod="metallb-system/speaker-pg6rr" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.530313 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/310dd289-ef3f-4c2b-87bf-eec891361a6e-metrics-certs\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.530368 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a8e306b-e5ee-4ced-b7be-f0a26248db92-metrics-certs\") pod \"speaker-pg6rr\" (UID: \"3a8e306b-e5ee-4ced-b7be-f0a26248db92\") " pod="metallb-system/speaker-pg6rr" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.530401 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/310dd289-ef3f-4c2b-87bf-eec891361a6e-frr-conf\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: E0106 08:28:56.530466 4784 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Jan 06 08:28:56 crc kubenswrapper[4784]: E0106 08:28:56.530494 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/310dd289-ef3f-4c2b-87bf-eec891361a6e-metrics-certs podName:310dd289-ef3f-4c2b-87bf-eec891361a6e nodeName:}" failed. No retries permitted until 2026-01-06 08:28:57.030486715 +0000 UTC m=+839.076659552 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/310dd289-ef3f-4c2b-87bf-eec891361a6e-metrics-certs") pod "frr-k8s-tcrn4" (UID: "310dd289-ef3f-4c2b-87bf-eec891361a6e") : secret "frr-k8s-certs-secret" not found Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.530498 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/310dd289-ef3f-4c2b-87bf-eec891361a6e-reloader\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.530527 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/310dd289-ef3f-4c2b-87bf-eec891361a6e-metrics\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.530631 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/310dd289-ef3f-4c2b-87bf-eec891361a6e-frr-sockets\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.530877 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/310dd289-ef3f-4c2b-87bf-eec891361a6e-frr-conf\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.531325 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/310dd289-ef3f-4c2b-87bf-eec891361a6e-frr-startup\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.542950 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-5bddd4b946-crpzt"] Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.544066 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-5bddd4b946-crpzt" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.545442 4784 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.558283 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-5bddd4b946-crpzt"] Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.559651 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjqh6\" (UniqueName: \"kubernetes.io/projected/310dd289-ef3f-4c2b-87bf-eec891361a6e-kube-api-access-zjqh6\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.562635 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7k2wg\" (UniqueName: \"kubernetes.io/projected/5cabc455-2ed6-4b62-aba3-d7cdff292e99-kube-api-access-7k2wg\") pod \"frr-k8s-webhook-server-7784b6fcf-sw629\" (UID: \"5cabc455-2ed6-4b62-aba3-d7cdff292e99\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.631435 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gl64r\" (UniqueName: \"kubernetes.io/projected/3a8e306b-e5ee-4ced-b7be-f0a26248db92-kube-api-access-gl64r\") pod \"speaker-pg6rr\" (UID: \"3a8e306b-e5ee-4ced-b7be-f0a26248db92\") " pod="metallb-system/speaker-pg6rr" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.631476 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/3a8e306b-e5ee-4ced-b7be-f0a26248db92-metallb-excludel2\") pod \"speaker-pg6rr\" (UID: \"3a8e306b-e5ee-4ced-b7be-f0a26248db92\") " pod="metallb-system/speaker-pg6rr" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.631503 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skkt7\" (UniqueName: \"kubernetes.io/projected/7ec719ea-f249-4012-bd61-a87a31829e9c-kube-api-access-skkt7\") pod \"controller-5bddd4b946-crpzt\" (UID: \"7ec719ea-f249-4012-bd61-a87a31829e9c\") " pod="metallb-system/controller-5bddd4b946-crpzt" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.631523 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3a8e306b-e5ee-4ced-b7be-f0a26248db92-memberlist\") pod \"speaker-pg6rr\" (UID: \"3a8e306b-e5ee-4ced-b7be-f0a26248db92\") " pod="metallb-system/speaker-pg6rr" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.631578 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a8e306b-e5ee-4ced-b7be-f0a26248db92-metrics-certs\") pod \"speaker-pg6rr\" (UID: \"3a8e306b-e5ee-4ced-b7be-f0a26248db92\") " pod="metallb-system/speaker-pg6rr" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.631603 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7ec719ea-f249-4012-bd61-a87a31829e9c-metrics-certs\") pod \"controller-5bddd4b946-crpzt\" (UID: \"7ec719ea-f249-4012-bd61-a87a31829e9c\") " pod="metallb-system/controller-5bddd4b946-crpzt" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.631622 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7ec719ea-f249-4012-bd61-a87a31829e9c-cert\") pod \"controller-5bddd4b946-crpzt\" (UID: \"7ec719ea-f249-4012-bd61-a87a31829e9c\") " pod="metallb-system/controller-5bddd4b946-crpzt" Jan 06 08:28:56 crc kubenswrapper[4784]: E0106 08:28:56.632149 4784 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 06 08:28:56 crc kubenswrapper[4784]: E0106 08:28:56.632251 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3a8e306b-e5ee-4ced-b7be-f0a26248db92-memberlist podName:3a8e306b-e5ee-4ced-b7be-f0a26248db92 nodeName:}" failed. No retries permitted until 2026-01-06 08:28:57.132222471 +0000 UTC m=+839.178395528 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/3a8e306b-e5ee-4ced-b7be-f0a26248db92-memberlist") pod "speaker-pg6rr" (UID: "3a8e306b-e5ee-4ced-b7be-f0a26248db92") : secret "metallb-memberlist" not found Jan 06 08:28:56 crc kubenswrapper[4784]: E0106 08:28:56.632638 4784 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.632669 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/3a8e306b-e5ee-4ced-b7be-f0a26248db92-metallb-excludel2\") pod \"speaker-pg6rr\" (UID: \"3a8e306b-e5ee-4ced-b7be-f0a26248db92\") " pod="metallb-system/speaker-pg6rr" Jan 06 08:28:56 crc kubenswrapper[4784]: E0106 08:28:56.632689 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3a8e306b-e5ee-4ced-b7be-f0a26248db92-metrics-certs podName:3a8e306b-e5ee-4ced-b7be-f0a26248db92 nodeName:}" failed. No retries permitted until 2026-01-06 08:28:57.132679546 +0000 UTC m=+839.178852593 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3a8e306b-e5ee-4ced-b7be-f0a26248db92-metrics-certs") pod "speaker-pg6rr" (UID: "3a8e306b-e5ee-4ced-b7be-f0a26248db92") : secret "speaker-certs-secret" not found Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.654382 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gl64r\" (UniqueName: \"kubernetes.io/projected/3a8e306b-e5ee-4ced-b7be-f0a26248db92-kube-api-access-gl64r\") pod \"speaker-pg6rr\" (UID: \"3a8e306b-e5ee-4ced-b7be-f0a26248db92\") " pod="metallb-system/speaker-pg6rr" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.733077 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7ec719ea-f249-4012-bd61-a87a31829e9c-metrics-certs\") pod \"controller-5bddd4b946-crpzt\" (UID: \"7ec719ea-f249-4012-bd61-a87a31829e9c\") " pod="metallb-system/controller-5bddd4b946-crpzt" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.733201 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7ec719ea-f249-4012-bd61-a87a31829e9c-cert\") pod \"controller-5bddd4b946-crpzt\" (UID: \"7ec719ea-f249-4012-bd61-a87a31829e9c\") " pod="metallb-system/controller-5bddd4b946-crpzt" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.734037 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skkt7\" (UniqueName: \"kubernetes.io/projected/7ec719ea-f249-4012-bd61-a87a31829e9c-kube-api-access-skkt7\") pod \"controller-5bddd4b946-crpzt\" (UID: \"7ec719ea-f249-4012-bd61-a87a31829e9c\") " pod="metallb-system/controller-5bddd4b946-crpzt" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.738238 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7ec719ea-f249-4012-bd61-a87a31829e9c-cert\") pod \"controller-5bddd4b946-crpzt\" (UID: \"7ec719ea-f249-4012-bd61-a87a31829e9c\") " pod="metallb-system/controller-5bddd4b946-crpzt" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.738556 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7ec719ea-f249-4012-bd61-a87a31829e9c-metrics-certs\") pod \"controller-5bddd4b946-crpzt\" (UID: \"7ec719ea-f249-4012-bd61-a87a31829e9c\") " pod="metallb-system/controller-5bddd4b946-crpzt" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.755123 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skkt7\" (UniqueName: \"kubernetes.io/projected/7ec719ea-f249-4012-bd61-a87a31829e9c-kube-api-access-skkt7\") pod \"controller-5bddd4b946-crpzt\" (UID: \"7ec719ea-f249-4012-bd61-a87a31829e9c\") " pod="metallb-system/controller-5bddd4b946-crpzt" Jan 06 08:28:56 crc kubenswrapper[4784]: I0106 08:28:56.860691 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-5bddd4b946-crpzt" Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.042997 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/310dd289-ef3f-4c2b-87bf-eec891361a6e-metrics-certs\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.043142 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5cabc455-2ed6-4b62-aba3-d7cdff292e99-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-sw629\" (UID: \"5cabc455-2ed6-4b62-aba3-d7cdff292e99\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629" Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.055842 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/310dd289-ef3f-4c2b-87bf-eec891361a6e-metrics-certs\") pod \"frr-k8s-tcrn4\" (UID: \"310dd289-ef3f-4c2b-87bf-eec891361a6e\") " pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.056340 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5cabc455-2ed6-4b62-aba3-d7cdff292e99-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-sw629\" (UID: \"5cabc455-2ed6-4b62-aba3-d7cdff292e99\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629" Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.121945 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-5bddd4b946-crpzt"] Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.145192 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3a8e306b-e5ee-4ced-b7be-f0a26248db92-memberlist\") pod \"speaker-pg6rr\" (UID: \"3a8e306b-e5ee-4ced-b7be-f0a26248db92\") " pod="metallb-system/speaker-pg6rr" Jan 06 08:28:57 crc kubenswrapper[4784]: E0106 08:28:57.145360 4784 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 06 08:28:57 crc kubenswrapper[4784]: E0106 08:28:57.145445 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3a8e306b-e5ee-4ced-b7be-f0a26248db92-memberlist podName:3a8e306b-e5ee-4ced-b7be-f0a26248db92 nodeName:}" failed. No retries permitted until 2026-01-06 08:28:58.145422087 +0000 UTC m=+840.191594924 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/3a8e306b-e5ee-4ced-b7be-f0a26248db92-memberlist") pod "speaker-pg6rr" (UID: "3a8e306b-e5ee-4ced-b7be-f0a26248db92") : secret "metallb-memberlist" not found Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.146909 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a8e306b-e5ee-4ced-b7be-f0a26248db92-metrics-certs\") pod \"speaker-pg6rr\" (UID: \"3a8e306b-e5ee-4ced-b7be-f0a26248db92\") " pod="metallb-system/speaker-pg6rr" Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.153489 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a8e306b-e5ee-4ced-b7be-f0a26248db92-metrics-certs\") pod \"speaker-pg6rr\" (UID: \"3a8e306b-e5ee-4ced-b7be-f0a26248db92\") " pod="metallb-system/speaker-pg6rr" Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.333484 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.346226 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629" Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.616405 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629"] Jan 06 08:28:57 crc kubenswrapper[4784]: W0106 08:28:57.626691 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5cabc455_2ed6_4b62_aba3_d7cdff292e99.slice/crio-da0c0c7d5a1763b7371683fb4fb2715b8d507219fa431fa6bc173424cc009ce2 WatchSource:0}: Error finding container da0c0c7d5a1763b7371683fb4fb2715b8d507219fa431fa6bc173424cc009ce2: Status 404 returned error can't find the container with id da0c0c7d5a1763b7371683fb4fb2715b8d507219fa431fa6bc173424cc009ce2 Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.650232 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5bddd4b946-crpzt" event={"ID":"7ec719ea-f249-4012-bd61-a87a31829e9c","Type":"ContainerStarted","Data":"a7c24fc896191d77a4daeaec4ce267ab217c173133bd7b129b3b159b9e1422d2"} Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.650308 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5bddd4b946-crpzt" event={"ID":"7ec719ea-f249-4012-bd61-a87a31829e9c","Type":"ContainerStarted","Data":"6f2a9654310326548cdcb17e627dddc452381b13036cff609fa233e685a3d5a7"} Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.650332 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5bddd4b946-crpzt" event={"ID":"7ec719ea-f249-4012-bd61-a87a31829e9c","Type":"ContainerStarted","Data":"11f455b3e0e45e01bf99da6f5dfcdc300b57f8c508f3d45a0428588e9ca66c1f"} Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.650759 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-5bddd4b946-crpzt" Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.656258 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tcrn4" event={"ID":"310dd289-ef3f-4c2b-87bf-eec891361a6e","Type":"ContainerStarted","Data":"bc1096728f51f62c82e112ddc6dd66764e9c9b2f2907dadf023e9079cf65f308"} Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.658316 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629" event={"ID":"5cabc455-2ed6-4b62-aba3-d7cdff292e99","Type":"ContainerStarted","Data":"da0c0c7d5a1763b7371683fb4fb2715b8d507219fa431fa6bc173424cc009ce2"} Jan 06 08:28:57 crc kubenswrapper[4784]: I0106 08:28:57.672093 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-5bddd4b946-crpzt" podStartSLOduration=1.672065625 podStartE2EDuration="1.672065625s" podCreationTimestamp="2026-01-06 08:28:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:28:57.667191422 +0000 UTC m=+839.713364269" watchObservedRunningTime="2026-01-06 08:28:57.672065625 +0000 UTC m=+839.718238462" Jan 06 08:28:58 crc kubenswrapper[4784]: I0106 08:28:58.164754 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3a8e306b-e5ee-4ced-b7be-f0a26248db92-memberlist\") pod \"speaker-pg6rr\" (UID: \"3a8e306b-e5ee-4ced-b7be-f0a26248db92\") " pod="metallb-system/speaker-pg6rr" Jan 06 08:28:58 crc kubenswrapper[4784]: I0106 08:28:58.172242 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3a8e306b-e5ee-4ced-b7be-f0a26248db92-memberlist\") pod \"speaker-pg6rr\" (UID: \"3a8e306b-e5ee-4ced-b7be-f0a26248db92\") " pod="metallb-system/speaker-pg6rr" Jan 06 08:28:58 crc kubenswrapper[4784]: I0106 08:28:58.342593 4784 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-gdgkm" Jan 06 08:28:58 crc kubenswrapper[4784]: I0106 08:28:58.347017 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-pg6rr" Jan 06 08:28:58 crc kubenswrapper[4784]: I0106 08:28:58.667559 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-pg6rr" event={"ID":"3a8e306b-e5ee-4ced-b7be-f0a26248db92","Type":"ContainerStarted","Data":"8311adcb7294dc87fd945dc3300601e6a22a455674518bf9a100a091bbe9dcac"} Jan 06 08:28:59 crc kubenswrapper[4784]: I0106 08:28:59.682158 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-pg6rr" event={"ID":"3a8e306b-e5ee-4ced-b7be-f0a26248db92","Type":"ContainerStarted","Data":"e26c4555b5f1a374bae54d77d1acfea2a5eaa6f5fa8eeca20c7989ba2d835c96"} Jan 06 08:28:59 crc kubenswrapper[4784]: I0106 08:28:59.682701 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-pg6rr" Jan 06 08:28:59 crc kubenswrapper[4784]: I0106 08:28:59.682716 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-pg6rr" event={"ID":"3a8e306b-e5ee-4ced-b7be-f0a26248db92","Type":"ContainerStarted","Data":"970a69e877ef69118104d566df868e6f0663682f7dd003d067a3b88afca6bd07"} Jan 06 08:29:06 crc kubenswrapper[4784]: I0106 08:29:06.737136 4784 generic.go:334] "Generic (PLEG): container finished" podID="310dd289-ef3f-4c2b-87bf-eec891361a6e" containerID="d6bd103f96c87aed8d99fb2f2eee7ca8bbad27824c665fbca4bd0382b5c107dc" exitCode=0 Jan 06 08:29:06 crc kubenswrapper[4784]: I0106 08:29:06.737312 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tcrn4" event={"ID":"310dd289-ef3f-4c2b-87bf-eec891361a6e","Type":"ContainerDied","Data":"d6bd103f96c87aed8d99fb2f2eee7ca8bbad27824c665fbca4bd0382b5c107dc"} Jan 06 08:29:06 crc kubenswrapper[4784]: I0106 08:29:06.742614 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629" event={"ID":"5cabc455-2ed6-4b62-aba3-d7cdff292e99","Type":"ContainerStarted","Data":"9b481c9969fd51aab61193e7ec34b1562c738cf8752bc97427537ce510673561"} Jan 06 08:29:06 crc kubenswrapper[4784]: I0106 08:29:06.742856 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629" Jan 06 08:29:06 crc kubenswrapper[4784]: I0106 08:29:06.777075 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-pg6rr" podStartSLOduration=10.777049598 podStartE2EDuration="10.777049598s" podCreationTimestamp="2026-01-06 08:28:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:28:59.712720346 +0000 UTC m=+841.758893183" watchObservedRunningTime="2026-01-06 08:29:06.777049598 +0000 UTC m=+848.823222455" Jan 06 08:29:06 crc kubenswrapper[4784]: I0106 08:29:06.802715 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629" podStartSLOduration=2.952595895 podStartE2EDuration="10.802689604s" podCreationTimestamp="2026-01-06 08:28:56 +0000 UTC" firstStartedPulling="2026-01-06 08:28:57.629398494 +0000 UTC m=+839.675571331" lastFinishedPulling="2026-01-06 08:29:05.479492163 +0000 UTC m=+847.525665040" observedRunningTime="2026-01-06 08:29:06.797937794 +0000 UTC m=+848.844110631" watchObservedRunningTime="2026-01-06 08:29:06.802689604 +0000 UTC m=+848.848862451" Jan 06 08:29:07 crc kubenswrapper[4784]: I0106 08:29:07.751980 4784 generic.go:334] "Generic (PLEG): container finished" podID="310dd289-ef3f-4c2b-87bf-eec891361a6e" containerID="7bacbc428228a5b1e653e38d0bb096814796a27d9c1487e0bb974635fbf300c0" exitCode=0 Jan 06 08:29:07 crc kubenswrapper[4784]: I0106 08:29:07.752066 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tcrn4" event={"ID":"310dd289-ef3f-4c2b-87bf-eec891361a6e","Type":"ContainerDied","Data":"7bacbc428228a5b1e653e38d0bb096814796a27d9c1487e0bb974635fbf300c0"} Jan 06 08:29:08 crc kubenswrapper[4784]: I0106 08:29:08.352956 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-pg6rr" Jan 06 08:29:08 crc kubenswrapper[4784]: I0106 08:29:08.767491 4784 generic.go:334] "Generic (PLEG): container finished" podID="310dd289-ef3f-4c2b-87bf-eec891361a6e" containerID="3c7a8129418d5b0bf5b186fecefdb3acb474f3d450dc3a7907c091a7a60b49c7" exitCode=0 Jan 06 08:29:08 crc kubenswrapper[4784]: I0106 08:29:08.767589 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tcrn4" event={"ID":"310dd289-ef3f-4c2b-87bf-eec891361a6e","Type":"ContainerDied","Data":"3c7a8129418d5b0bf5b186fecefdb3acb474f3d450dc3a7907c091a7a60b49c7"} Jan 06 08:29:09 crc kubenswrapper[4784]: I0106 08:29:09.784500 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tcrn4" event={"ID":"310dd289-ef3f-4c2b-87bf-eec891361a6e","Type":"ContainerStarted","Data":"e1c4fab35eb03ce183fa7d7efb607d21e658c1c704fab0afbff140c82e912d3a"} Jan 06 08:29:09 crc kubenswrapper[4784]: I0106 08:29:09.785121 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tcrn4" event={"ID":"310dd289-ef3f-4c2b-87bf-eec891361a6e","Type":"ContainerStarted","Data":"3377cfe53ccc8301aa29f9ac1fb1bd5dd5c774ea38217da87d65de4c3d820871"} Jan 06 08:29:09 crc kubenswrapper[4784]: I0106 08:29:09.785150 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tcrn4" event={"ID":"310dd289-ef3f-4c2b-87bf-eec891361a6e","Type":"ContainerStarted","Data":"5d266afd31dfe96963cdaf2f6d443a937350ae20e3fc13010e393c45aec67d26"} Jan 06 08:29:09 crc kubenswrapper[4784]: I0106 08:29:09.785161 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tcrn4" event={"ID":"310dd289-ef3f-4c2b-87bf-eec891361a6e","Type":"ContainerStarted","Data":"2dbc604e2b02f1d11322edee6897e878781258fbffab9cd7c50e445e8f520d40"} Jan 06 08:29:09 crc kubenswrapper[4784]: I0106 08:29:09.785170 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tcrn4" event={"ID":"310dd289-ef3f-4c2b-87bf-eec891361a6e","Type":"ContainerStarted","Data":"acd28634d86a2deefbfe6dea93f8795997ca46cf77dc1d04129eb83d94671f66"} Jan 06 08:29:09 crc kubenswrapper[4784]: I0106 08:29:09.940432 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl"] Jan 06 08:29:09 crc kubenswrapper[4784]: I0106 08:29:09.941864 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" Jan 06 08:29:09 crc kubenswrapper[4784]: I0106 08:29:09.945341 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 06 08:29:09 crc kubenswrapper[4784]: I0106 08:29:09.955361 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl"] Jan 06 08:29:09 crc kubenswrapper[4784]: I0106 08:29:09.982197 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79cbf\" (UniqueName: \"kubernetes.io/projected/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-kube-api-access-79cbf\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl\" (UID: \"6acbaac6-e54d-44c6-a0a3-cafa748daa9a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" Jan 06 08:29:09 crc kubenswrapper[4784]: I0106 08:29:09.982714 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl\" (UID: \"6acbaac6-e54d-44c6-a0a3-cafa748daa9a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" Jan 06 08:29:09 crc kubenswrapper[4784]: I0106 08:29:09.982850 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl\" (UID: \"6acbaac6-e54d-44c6-a0a3-cafa748daa9a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" Jan 06 08:29:10 crc kubenswrapper[4784]: I0106 08:29:10.085292 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79cbf\" (UniqueName: \"kubernetes.io/projected/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-kube-api-access-79cbf\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl\" (UID: \"6acbaac6-e54d-44c6-a0a3-cafa748daa9a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" Jan 06 08:29:10 crc kubenswrapper[4784]: I0106 08:29:10.085625 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl\" (UID: \"6acbaac6-e54d-44c6-a0a3-cafa748daa9a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" Jan 06 08:29:10 crc kubenswrapper[4784]: I0106 08:29:10.085694 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl\" (UID: \"6acbaac6-e54d-44c6-a0a3-cafa748daa9a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" Jan 06 08:29:10 crc kubenswrapper[4784]: I0106 08:29:10.086349 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl\" (UID: \"6acbaac6-e54d-44c6-a0a3-cafa748daa9a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" Jan 06 08:29:10 crc kubenswrapper[4784]: I0106 08:29:10.086641 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl\" (UID: \"6acbaac6-e54d-44c6-a0a3-cafa748daa9a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" Jan 06 08:29:10 crc kubenswrapper[4784]: I0106 08:29:10.109929 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79cbf\" (UniqueName: \"kubernetes.io/projected/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-kube-api-access-79cbf\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl\" (UID: \"6acbaac6-e54d-44c6-a0a3-cafa748daa9a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" Jan 06 08:29:10 crc kubenswrapper[4784]: I0106 08:29:10.262323 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" Jan 06 08:29:10 crc kubenswrapper[4784]: I0106 08:29:10.627335 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl"] Jan 06 08:29:10 crc kubenswrapper[4784]: I0106 08:29:10.798311 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tcrn4" event={"ID":"310dd289-ef3f-4c2b-87bf-eec891361a6e","Type":"ContainerStarted","Data":"8b8e89fbf2c2963f3e5362b017841657889431b6e52c79647d75ccbf690cde8e"} Jan 06 08:29:10 crc kubenswrapper[4784]: I0106 08:29:10.798523 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:29:10 crc kubenswrapper[4784]: I0106 08:29:10.800534 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" event={"ID":"6acbaac6-e54d-44c6-a0a3-cafa748daa9a","Type":"ContainerStarted","Data":"b8d7e9f66ab262fd818fc786b1646f65fe4a699a04730d0a02997b9f7594affb"} Jan 06 08:29:10 crc kubenswrapper[4784]: I0106 08:29:10.800646 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" event={"ID":"6acbaac6-e54d-44c6-a0a3-cafa748daa9a","Type":"ContainerStarted","Data":"946f810a27c4a92a50a4f3e7ccd29926f8513503272b28ce77d4ee82ec270890"} Jan 06 08:29:10 crc kubenswrapper[4784]: I0106 08:29:10.832769 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-tcrn4" podStartSLOduration=6.84183621 podStartE2EDuration="14.832744683s" podCreationTimestamp="2026-01-06 08:28:56 +0000 UTC" firstStartedPulling="2026-01-06 08:28:57.488618481 +0000 UTC m=+839.534791318" lastFinishedPulling="2026-01-06 08:29:05.479526944 +0000 UTC m=+847.525699791" observedRunningTime="2026-01-06 08:29:10.827773956 +0000 UTC m=+852.873946793" watchObservedRunningTime="2026-01-06 08:29:10.832744683 +0000 UTC m=+852.878917520" Jan 06 08:29:11 crc kubenswrapper[4784]: I0106 08:29:11.812468 4784 generic.go:334] "Generic (PLEG): container finished" podID="6acbaac6-e54d-44c6-a0a3-cafa748daa9a" containerID="b8d7e9f66ab262fd818fc786b1646f65fe4a699a04730d0a02997b9f7594affb" exitCode=0 Jan 06 08:29:11 crc kubenswrapper[4784]: I0106 08:29:11.812597 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" event={"ID":"6acbaac6-e54d-44c6-a0a3-cafa748daa9a","Type":"ContainerDied","Data":"b8d7e9f66ab262fd818fc786b1646f65fe4a699a04730d0a02997b9f7594affb"} Jan 06 08:29:12 crc kubenswrapper[4784]: I0106 08:29:12.335296 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:29:12 crc kubenswrapper[4784]: I0106 08:29:12.388105 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:29:15 crc kubenswrapper[4784]: I0106 08:29:15.850763 4784 generic.go:334] "Generic (PLEG): container finished" podID="6acbaac6-e54d-44c6-a0a3-cafa748daa9a" containerID="0246f071020b11ed00970067f71e130e48fef72633073478a8732c6ba2a9f762" exitCode=0 Jan 06 08:29:15 crc kubenswrapper[4784]: I0106 08:29:15.850873 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" event={"ID":"6acbaac6-e54d-44c6-a0a3-cafa748daa9a","Type":"ContainerDied","Data":"0246f071020b11ed00970067f71e130e48fef72633073478a8732c6ba2a9f762"} Jan 06 08:29:16 crc kubenswrapper[4784]: I0106 08:29:16.862850 4784 generic.go:334] "Generic (PLEG): container finished" podID="6acbaac6-e54d-44c6-a0a3-cafa748daa9a" containerID="d08fb2ceb985324d488167c901b27b5ae6682245f222a1803ca3e4e610b88629" exitCode=0 Jan 06 08:29:16 crc kubenswrapper[4784]: I0106 08:29:16.862917 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" event={"ID":"6acbaac6-e54d-44c6-a0a3-cafa748daa9a","Type":"ContainerDied","Data":"d08fb2ceb985324d488167c901b27b5ae6682245f222a1803ca3e4e610b88629"} Jan 06 08:29:16 crc kubenswrapper[4784]: I0106 08:29:16.865126 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-5bddd4b946-crpzt" Jan 06 08:29:17 crc kubenswrapper[4784]: I0106 08:29:17.353271 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-sw629" Jan 06 08:29:18 crc kubenswrapper[4784]: I0106 08:29:18.145325 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" Jan 06 08:29:18 crc kubenswrapper[4784]: I0106 08:29:18.241378 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-util\") pod \"6acbaac6-e54d-44c6-a0a3-cafa748daa9a\" (UID: \"6acbaac6-e54d-44c6-a0a3-cafa748daa9a\") " Jan 06 08:29:18 crc kubenswrapper[4784]: I0106 08:29:18.241433 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-bundle\") pod \"6acbaac6-e54d-44c6-a0a3-cafa748daa9a\" (UID: \"6acbaac6-e54d-44c6-a0a3-cafa748daa9a\") " Jan 06 08:29:18 crc kubenswrapper[4784]: I0106 08:29:18.241563 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79cbf\" (UniqueName: \"kubernetes.io/projected/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-kube-api-access-79cbf\") pod \"6acbaac6-e54d-44c6-a0a3-cafa748daa9a\" (UID: \"6acbaac6-e54d-44c6-a0a3-cafa748daa9a\") " Jan 06 08:29:18 crc kubenswrapper[4784]: I0106 08:29:18.245171 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-bundle" (OuterVolumeSpecName: "bundle") pod "6acbaac6-e54d-44c6-a0a3-cafa748daa9a" (UID: "6acbaac6-e54d-44c6-a0a3-cafa748daa9a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:29:18 crc kubenswrapper[4784]: I0106 08:29:18.252227 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-util" (OuterVolumeSpecName: "util") pod "6acbaac6-e54d-44c6-a0a3-cafa748daa9a" (UID: "6acbaac6-e54d-44c6-a0a3-cafa748daa9a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:29:18 crc kubenswrapper[4784]: I0106 08:29:18.264812 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-kube-api-access-79cbf" (OuterVolumeSpecName: "kube-api-access-79cbf") pod "6acbaac6-e54d-44c6-a0a3-cafa748daa9a" (UID: "6acbaac6-e54d-44c6-a0a3-cafa748daa9a"). InnerVolumeSpecName "kube-api-access-79cbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:29:18 crc kubenswrapper[4784]: I0106 08:29:18.343607 4784 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-util\") on node \"crc\" DevicePath \"\"" Jan 06 08:29:18 crc kubenswrapper[4784]: I0106 08:29:18.343650 4784 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:29:18 crc kubenswrapper[4784]: I0106 08:29:18.343662 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79cbf\" (UniqueName: \"kubernetes.io/projected/6acbaac6-e54d-44c6-a0a3-cafa748daa9a-kube-api-access-79cbf\") on node \"crc\" DevicePath \"\"" Jan 06 08:29:18 crc kubenswrapper[4784]: I0106 08:29:18.883675 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" event={"ID":"6acbaac6-e54d-44c6-a0a3-cafa748daa9a","Type":"ContainerDied","Data":"946f810a27c4a92a50a4f3e7ccd29926f8513503272b28ce77d4ee82ec270890"} Jan 06 08:29:18 crc kubenswrapper[4784]: I0106 08:29:18.884052 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="946f810a27c4a92a50a4f3e7ccd29926f8513503272b28ce77d4ee82ec270890" Jan 06 08:29:18 crc kubenswrapper[4784]: I0106 08:29:18.883919 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.049350 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qdsvl"] Jan 06 08:29:23 crc kubenswrapper[4784]: E0106 08:29:23.049888 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6acbaac6-e54d-44c6-a0a3-cafa748daa9a" containerName="util" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.049901 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="6acbaac6-e54d-44c6-a0a3-cafa748daa9a" containerName="util" Jan 06 08:29:23 crc kubenswrapper[4784]: E0106 08:29:23.049911 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6acbaac6-e54d-44c6-a0a3-cafa748daa9a" containerName="extract" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.049917 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="6acbaac6-e54d-44c6-a0a3-cafa748daa9a" containerName="extract" Jan 06 08:29:23 crc kubenswrapper[4784]: E0106 08:29:23.049925 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6acbaac6-e54d-44c6-a0a3-cafa748daa9a" containerName="pull" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.049931 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="6acbaac6-e54d-44c6-a0a3-cafa748daa9a" containerName="pull" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.050029 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="6acbaac6-e54d-44c6-a0a3-cafa748daa9a" containerName="extract" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.050391 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qdsvl" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.054694 4784 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-8kxp6" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.055043 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.057231 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.077107 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qdsvl"] Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.114705 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f2c2f736-8add-420b-bc7f-4f14c2257747-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-qdsvl\" (UID: \"f2c2f736-8add-420b-bc7f-4f14c2257747\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qdsvl" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.114825 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwg6q\" (UniqueName: \"kubernetes.io/projected/f2c2f736-8add-420b-bc7f-4f14c2257747-kube-api-access-fwg6q\") pod \"cert-manager-operator-controller-manager-64cf6dff88-qdsvl\" (UID: \"f2c2f736-8add-420b-bc7f-4f14c2257747\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qdsvl" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.216140 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwg6q\" (UniqueName: \"kubernetes.io/projected/f2c2f736-8add-420b-bc7f-4f14c2257747-kube-api-access-fwg6q\") pod \"cert-manager-operator-controller-manager-64cf6dff88-qdsvl\" (UID: \"f2c2f736-8add-420b-bc7f-4f14c2257747\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qdsvl" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.216325 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f2c2f736-8add-420b-bc7f-4f14c2257747-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-qdsvl\" (UID: \"f2c2f736-8add-420b-bc7f-4f14c2257747\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qdsvl" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.217093 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f2c2f736-8add-420b-bc7f-4f14c2257747-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-qdsvl\" (UID: \"f2c2f736-8add-420b-bc7f-4f14c2257747\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qdsvl" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.238871 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwg6q\" (UniqueName: \"kubernetes.io/projected/f2c2f736-8add-420b-bc7f-4f14c2257747-kube-api-access-fwg6q\") pod \"cert-manager-operator-controller-manager-64cf6dff88-qdsvl\" (UID: \"f2c2f736-8add-420b-bc7f-4f14c2257747\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qdsvl" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.369072 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qdsvl" Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.669325 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qdsvl"] Jan 06 08:29:23 crc kubenswrapper[4784]: W0106 08:29:23.680602 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf2c2f736_8add_420b_bc7f_4f14c2257747.slice/crio-54c5af286b0133aeb1b2260b5537e4b4843016381fb9311b87303472707541a4 WatchSource:0}: Error finding container 54c5af286b0133aeb1b2260b5537e4b4843016381fb9311b87303472707541a4: Status 404 returned error can't find the container with id 54c5af286b0133aeb1b2260b5537e4b4843016381fb9311b87303472707541a4 Jan 06 08:29:23 crc kubenswrapper[4784]: I0106 08:29:23.920081 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qdsvl" event={"ID":"f2c2f736-8add-420b-bc7f-4f14c2257747","Type":"ContainerStarted","Data":"54c5af286b0133aeb1b2260b5537e4b4843016381fb9311b87303472707541a4"} Jan 06 08:29:27 crc kubenswrapper[4784]: I0106 08:29:27.336921 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-tcrn4" Jan 06 08:29:33 crc kubenswrapper[4784]: I0106 08:29:33.026954 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qdsvl" event={"ID":"f2c2f736-8add-420b-bc7f-4f14c2257747","Type":"ContainerStarted","Data":"167de082a6088f759ae4ad55216637cc2aa7d3231815b34fa422344f78b417c5"} Jan 06 08:29:33 crc kubenswrapper[4784]: I0106 08:29:33.051903 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qdsvl" podStartSLOduration=1.347542232 podStartE2EDuration="10.051883898s" podCreationTimestamp="2026-01-06 08:29:23 +0000 UTC" firstStartedPulling="2026-01-06 08:29:23.687256841 +0000 UTC m=+865.733429678" lastFinishedPulling="2026-01-06 08:29:32.391598517 +0000 UTC m=+874.437771344" observedRunningTime="2026-01-06 08:29:33.048352905 +0000 UTC m=+875.094525762" watchObservedRunningTime="2026-01-06 08:29:33.051883898 +0000 UTC m=+875.098056745" Jan 06 08:29:36 crc kubenswrapper[4784]: I0106 08:29:36.803270 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-bzzrv"] Jan 06 08:29:36 crc kubenswrapper[4784]: I0106 08:29:36.804869 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-bzzrv" Jan 06 08:29:36 crc kubenswrapper[4784]: I0106 08:29:36.812364 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 06 08:29:36 crc kubenswrapper[4784]: I0106 08:29:36.812424 4784 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-qnth6" Jan 06 08:29:36 crc kubenswrapper[4784]: I0106 08:29:36.812724 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 06 08:29:36 crc kubenswrapper[4784]: I0106 08:29:36.815252 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-bzzrv"] Jan 06 08:29:36 crc kubenswrapper[4784]: I0106 08:29:36.957729 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csx4k\" (UniqueName: \"kubernetes.io/projected/958c414a-2141-4223-bf2d-f4ba1a83194e-kube-api-access-csx4k\") pod \"cert-manager-webhook-f4fb5df64-bzzrv\" (UID: \"958c414a-2141-4223-bf2d-f4ba1a83194e\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-bzzrv" Jan 06 08:29:36 crc kubenswrapper[4784]: I0106 08:29:36.958019 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/958c414a-2141-4223-bf2d-f4ba1a83194e-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-bzzrv\" (UID: \"958c414a-2141-4223-bf2d-f4ba1a83194e\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-bzzrv" Jan 06 08:29:37 crc kubenswrapper[4784]: I0106 08:29:37.059111 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csx4k\" (UniqueName: \"kubernetes.io/projected/958c414a-2141-4223-bf2d-f4ba1a83194e-kube-api-access-csx4k\") pod \"cert-manager-webhook-f4fb5df64-bzzrv\" (UID: \"958c414a-2141-4223-bf2d-f4ba1a83194e\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-bzzrv" Jan 06 08:29:37 crc kubenswrapper[4784]: I0106 08:29:37.059169 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/958c414a-2141-4223-bf2d-f4ba1a83194e-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-bzzrv\" (UID: \"958c414a-2141-4223-bf2d-f4ba1a83194e\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-bzzrv" Jan 06 08:29:37 crc kubenswrapper[4784]: I0106 08:29:37.081269 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/958c414a-2141-4223-bf2d-f4ba1a83194e-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-bzzrv\" (UID: \"958c414a-2141-4223-bf2d-f4ba1a83194e\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-bzzrv" Jan 06 08:29:37 crc kubenswrapper[4784]: I0106 08:29:37.081401 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csx4k\" (UniqueName: \"kubernetes.io/projected/958c414a-2141-4223-bf2d-f4ba1a83194e-kube-api-access-csx4k\") pod \"cert-manager-webhook-f4fb5df64-bzzrv\" (UID: \"958c414a-2141-4223-bf2d-f4ba1a83194e\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-bzzrv" Jan 06 08:29:37 crc kubenswrapper[4784]: I0106 08:29:37.126102 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-bzzrv" Jan 06 08:29:37 crc kubenswrapper[4784]: I0106 08:29:37.614436 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-bzzrv"] Jan 06 08:29:37 crc kubenswrapper[4784]: I0106 08:29:37.939162 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-57bnw"] Jan 06 08:29:37 crc kubenswrapper[4784]: I0106 08:29:37.940946 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-57bnw" Jan 06 08:29:37 crc kubenswrapper[4784]: I0106 08:29:37.947033 4784 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-h7mgx" Jan 06 08:29:37 crc kubenswrapper[4784]: I0106 08:29:37.948771 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-57bnw"] Jan 06 08:29:37 crc kubenswrapper[4784]: I0106 08:29:37.978006 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bd7d1a44-da72-4cb3-bbb9-b71e33c366dc-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-57bnw\" (UID: \"bd7d1a44-da72-4cb3-bbb9-b71e33c366dc\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-57bnw" Jan 06 08:29:37 crc kubenswrapper[4784]: I0106 08:29:37.978128 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjgrb\" (UniqueName: \"kubernetes.io/projected/bd7d1a44-da72-4cb3-bbb9-b71e33c366dc-kube-api-access-jjgrb\") pod \"cert-manager-cainjector-855d9ccff4-57bnw\" (UID: \"bd7d1a44-da72-4cb3-bbb9-b71e33c366dc\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-57bnw" Jan 06 08:29:38 crc kubenswrapper[4784]: I0106 08:29:38.059850 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-bzzrv" event={"ID":"958c414a-2141-4223-bf2d-f4ba1a83194e","Type":"ContainerStarted","Data":"7538389653fcfceb0fcf77973ae04491ab7aa5f03c268ba19a69bf7e7c66b211"} Jan 06 08:29:38 crc kubenswrapper[4784]: I0106 08:29:38.080328 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bd7d1a44-da72-4cb3-bbb9-b71e33c366dc-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-57bnw\" (UID: \"bd7d1a44-da72-4cb3-bbb9-b71e33c366dc\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-57bnw" Jan 06 08:29:38 crc kubenswrapper[4784]: I0106 08:29:38.080449 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjgrb\" (UniqueName: \"kubernetes.io/projected/bd7d1a44-da72-4cb3-bbb9-b71e33c366dc-kube-api-access-jjgrb\") pod \"cert-manager-cainjector-855d9ccff4-57bnw\" (UID: \"bd7d1a44-da72-4cb3-bbb9-b71e33c366dc\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-57bnw" Jan 06 08:29:38 crc kubenswrapper[4784]: I0106 08:29:38.104263 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bd7d1a44-da72-4cb3-bbb9-b71e33c366dc-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-57bnw\" (UID: \"bd7d1a44-da72-4cb3-bbb9-b71e33c366dc\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-57bnw" Jan 06 08:29:38 crc kubenswrapper[4784]: I0106 08:29:38.107666 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjgrb\" (UniqueName: \"kubernetes.io/projected/bd7d1a44-da72-4cb3-bbb9-b71e33c366dc-kube-api-access-jjgrb\") pod \"cert-manager-cainjector-855d9ccff4-57bnw\" (UID: \"bd7d1a44-da72-4cb3-bbb9-b71e33c366dc\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-57bnw" Jan 06 08:29:38 crc kubenswrapper[4784]: I0106 08:29:38.263657 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-57bnw" Jan 06 08:29:38 crc kubenswrapper[4784]: I0106 08:29:38.537691 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-57bnw"] Jan 06 08:29:38 crc kubenswrapper[4784]: W0106 08:29:38.546889 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd7d1a44_da72_4cb3_bbb9_b71e33c366dc.slice/crio-f24b03118bd9e717313e98de852adf8b0d465487e781fe11c8555650236d460f WatchSource:0}: Error finding container f24b03118bd9e717313e98de852adf8b0d465487e781fe11c8555650236d460f: Status 404 returned error can't find the container with id f24b03118bd9e717313e98de852adf8b0d465487e781fe11c8555650236d460f Jan 06 08:29:39 crc kubenswrapper[4784]: I0106 08:29:39.072375 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-57bnw" event={"ID":"bd7d1a44-da72-4cb3-bbb9-b71e33c366dc","Type":"ContainerStarted","Data":"f24b03118bd9e717313e98de852adf8b0d465487e781fe11c8555650236d460f"} Jan 06 08:29:46 crc kubenswrapper[4784]: I0106 08:29:46.125461 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-bzzrv" event={"ID":"958c414a-2141-4223-bf2d-f4ba1a83194e","Type":"ContainerStarted","Data":"0105a1932c1628395d5e1618aee2f100deb0766f601f41c4001c4adc06116a4e"} Jan 06 08:29:46 crc kubenswrapper[4784]: I0106 08:29:46.126471 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-bzzrv" Jan 06 08:29:46 crc kubenswrapper[4784]: I0106 08:29:46.127107 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-57bnw" event={"ID":"bd7d1a44-da72-4cb3-bbb9-b71e33c366dc","Type":"ContainerStarted","Data":"62f721377577b2e4918ca67ba3748e6bb200b8b868ba15bdeb10293f40c6182f"} Jan 06 08:29:46 crc kubenswrapper[4784]: I0106 08:29:46.147517 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-bzzrv" podStartSLOduration=1.930492028 podStartE2EDuration="10.147486845s" podCreationTimestamp="2026-01-06 08:29:36 +0000 UTC" firstStartedPulling="2026-01-06 08:29:37.627844553 +0000 UTC m=+879.674017400" lastFinishedPulling="2026-01-06 08:29:45.84483938 +0000 UTC m=+887.891012217" observedRunningTime="2026-01-06 08:29:46.144653251 +0000 UTC m=+888.190826098" watchObservedRunningTime="2026-01-06 08:29:46.147486845 +0000 UTC m=+888.193659722" Jan 06 08:29:46 crc kubenswrapper[4784]: I0106 08:29:46.162936 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-57bnw" podStartSLOduration=1.835284256 podStartE2EDuration="9.162914569s" podCreationTimestamp="2026-01-06 08:29:37 +0000 UTC" firstStartedPulling="2026-01-06 08:29:38.549463709 +0000 UTC m=+880.595636556" lastFinishedPulling="2026-01-06 08:29:45.877094032 +0000 UTC m=+887.923266869" observedRunningTime="2026-01-06 08:29:46.160275862 +0000 UTC m=+888.206448699" watchObservedRunningTime="2026-01-06 08:29:46.162914569 +0000 UTC m=+888.209087416" Jan 06 08:29:52 crc kubenswrapper[4784]: I0106 08:29:52.134961 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-bzzrv" Jan 06 08:29:53 crc kubenswrapper[4784]: I0106 08:29:53.216306 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-rl2h7"] Jan 06 08:29:53 crc kubenswrapper[4784]: I0106 08:29:53.217439 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-rl2h7" Jan 06 08:29:53 crc kubenswrapper[4784]: I0106 08:29:53.225634 4784 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-n7tzg" Jan 06 08:29:53 crc kubenswrapper[4784]: I0106 08:29:53.226209 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bnts\" (UniqueName: \"kubernetes.io/projected/c6e05974-22af-4307-83d0-4707be8f8694-kube-api-access-2bnts\") pod \"cert-manager-86cb77c54b-rl2h7\" (UID: \"c6e05974-22af-4307-83d0-4707be8f8694\") " pod="cert-manager/cert-manager-86cb77c54b-rl2h7" Jan 06 08:29:53 crc kubenswrapper[4784]: I0106 08:29:53.226378 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c6e05974-22af-4307-83d0-4707be8f8694-bound-sa-token\") pod \"cert-manager-86cb77c54b-rl2h7\" (UID: \"c6e05974-22af-4307-83d0-4707be8f8694\") " pod="cert-manager/cert-manager-86cb77c54b-rl2h7" Jan 06 08:29:53 crc kubenswrapper[4784]: I0106 08:29:53.234613 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-rl2h7"] Jan 06 08:29:53 crc kubenswrapper[4784]: I0106 08:29:53.328229 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bnts\" (UniqueName: \"kubernetes.io/projected/c6e05974-22af-4307-83d0-4707be8f8694-kube-api-access-2bnts\") pod \"cert-manager-86cb77c54b-rl2h7\" (UID: \"c6e05974-22af-4307-83d0-4707be8f8694\") " pod="cert-manager/cert-manager-86cb77c54b-rl2h7" Jan 06 08:29:53 crc kubenswrapper[4784]: I0106 08:29:53.328628 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c6e05974-22af-4307-83d0-4707be8f8694-bound-sa-token\") pod \"cert-manager-86cb77c54b-rl2h7\" (UID: \"c6e05974-22af-4307-83d0-4707be8f8694\") " pod="cert-manager/cert-manager-86cb77c54b-rl2h7" Jan 06 08:29:53 crc kubenswrapper[4784]: I0106 08:29:53.351883 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bnts\" (UniqueName: \"kubernetes.io/projected/c6e05974-22af-4307-83d0-4707be8f8694-kube-api-access-2bnts\") pod \"cert-manager-86cb77c54b-rl2h7\" (UID: \"c6e05974-22af-4307-83d0-4707be8f8694\") " pod="cert-manager/cert-manager-86cb77c54b-rl2h7" Jan 06 08:29:53 crc kubenswrapper[4784]: I0106 08:29:53.352168 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c6e05974-22af-4307-83d0-4707be8f8694-bound-sa-token\") pod \"cert-manager-86cb77c54b-rl2h7\" (UID: \"c6e05974-22af-4307-83d0-4707be8f8694\") " pod="cert-manager/cert-manager-86cb77c54b-rl2h7" Jan 06 08:29:53 crc kubenswrapper[4784]: I0106 08:29:53.544891 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-rl2h7" Jan 06 08:29:53 crc kubenswrapper[4784]: I0106 08:29:53.849794 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-rl2h7"] Jan 06 08:29:54 crc kubenswrapper[4784]: I0106 08:29:54.188220 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-rl2h7" event={"ID":"c6e05974-22af-4307-83d0-4707be8f8694","Type":"ContainerStarted","Data":"0b23f7dc5b035684b8d03d50dc31d2a9d215aa1cf82a2e8507def839e926dc04"} Jan 06 08:29:54 crc kubenswrapper[4784]: I0106 08:29:54.188289 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-rl2h7" event={"ID":"c6e05974-22af-4307-83d0-4707be8f8694","Type":"ContainerStarted","Data":"96ff4292fcd9eaa49a7243b4d773b3dafa829ff8051c8f6e511bc112632680ff"} Jan 06 08:29:54 crc kubenswrapper[4784]: I0106 08:29:54.213048 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-rl2h7" podStartSLOduration=1.213026542 podStartE2EDuration="1.213026542s" podCreationTimestamp="2026-01-06 08:29:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:29:54.210207469 +0000 UTC m=+896.256380336" watchObservedRunningTime="2026-01-06 08:29:54.213026542 +0000 UTC m=+896.259199379" Jan 06 08:30:00 crc kubenswrapper[4784]: I0106 08:30:00.179223 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6"] Jan 06 08:30:00 crc kubenswrapper[4784]: I0106 08:30:00.181393 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" Jan 06 08:30:00 crc kubenswrapper[4784]: I0106 08:30:00.184990 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 06 08:30:00 crc kubenswrapper[4784]: I0106 08:30:00.185041 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 06 08:30:00 crc kubenswrapper[4784]: I0106 08:30:00.199000 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6"] Jan 06 08:30:00 crc kubenswrapper[4784]: I0106 08:30:00.240155 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fbbcb33d-d688-44bb-bc55-791e7269c3fa-config-volume\") pod \"collect-profiles-29461470-h2zw6\" (UID: \"fbbcb33d-d688-44bb-bc55-791e7269c3fa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" Jan 06 08:30:00 crc kubenswrapper[4784]: I0106 08:30:00.240271 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4pb5\" (UniqueName: \"kubernetes.io/projected/fbbcb33d-d688-44bb-bc55-791e7269c3fa-kube-api-access-c4pb5\") pod \"collect-profiles-29461470-h2zw6\" (UID: \"fbbcb33d-d688-44bb-bc55-791e7269c3fa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" Jan 06 08:30:00 crc kubenswrapper[4784]: I0106 08:30:00.240406 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fbbcb33d-d688-44bb-bc55-791e7269c3fa-secret-volume\") pod \"collect-profiles-29461470-h2zw6\" (UID: \"fbbcb33d-d688-44bb-bc55-791e7269c3fa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" Jan 06 08:30:00 crc kubenswrapper[4784]: I0106 08:30:00.342586 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4pb5\" (UniqueName: \"kubernetes.io/projected/fbbcb33d-d688-44bb-bc55-791e7269c3fa-kube-api-access-c4pb5\") pod \"collect-profiles-29461470-h2zw6\" (UID: \"fbbcb33d-d688-44bb-bc55-791e7269c3fa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" Jan 06 08:30:00 crc kubenswrapper[4784]: I0106 08:30:00.342681 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fbbcb33d-d688-44bb-bc55-791e7269c3fa-secret-volume\") pod \"collect-profiles-29461470-h2zw6\" (UID: \"fbbcb33d-d688-44bb-bc55-791e7269c3fa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" Jan 06 08:30:00 crc kubenswrapper[4784]: I0106 08:30:00.342722 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fbbcb33d-d688-44bb-bc55-791e7269c3fa-config-volume\") pod \"collect-profiles-29461470-h2zw6\" (UID: \"fbbcb33d-d688-44bb-bc55-791e7269c3fa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" Jan 06 08:30:00 crc kubenswrapper[4784]: I0106 08:30:00.343946 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fbbcb33d-d688-44bb-bc55-791e7269c3fa-config-volume\") pod \"collect-profiles-29461470-h2zw6\" (UID: \"fbbcb33d-d688-44bb-bc55-791e7269c3fa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" Jan 06 08:30:00 crc kubenswrapper[4784]: I0106 08:30:00.358572 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fbbcb33d-d688-44bb-bc55-791e7269c3fa-secret-volume\") pod \"collect-profiles-29461470-h2zw6\" (UID: \"fbbcb33d-d688-44bb-bc55-791e7269c3fa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" Jan 06 08:30:00 crc kubenswrapper[4784]: I0106 08:30:00.373439 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4pb5\" (UniqueName: \"kubernetes.io/projected/fbbcb33d-d688-44bb-bc55-791e7269c3fa-kube-api-access-c4pb5\") pod \"collect-profiles-29461470-h2zw6\" (UID: \"fbbcb33d-d688-44bb-bc55-791e7269c3fa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" Jan 06 08:30:00 crc kubenswrapper[4784]: I0106 08:30:00.515241 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" Jan 06 08:30:01 crc kubenswrapper[4784]: I0106 08:30:01.560995 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6"] Jan 06 08:30:01 crc kubenswrapper[4784]: W0106 08:30:01.568010 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfbbcb33d_d688_44bb_bc55_791e7269c3fa.slice/crio-87aedc3e666667fc8c28c99963be5f5c68bcd2dbc2afb16115441e465cc48558 WatchSource:0}: Error finding container 87aedc3e666667fc8c28c99963be5f5c68bcd2dbc2afb16115441e465cc48558: Status 404 returned error can't find the container with id 87aedc3e666667fc8c28c99963be5f5c68bcd2dbc2afb16115441e465cc48558 Jan 06 08:30:02 crc kubenswrapper[4784]: I0106 08:30:02.256602 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" event={"ID":"fbbcb33d-d688-44bb-bc55-791e7269c3fa","Type":"ContainerStarted","Data":"87aedc3e666667fc8c28c99963be5f5c68bcd2dbc2afb16115441e465cc48558"} Jan 06 08:30:05 crc kubenswrapper[4784]: I0106 08:30:05.280966 4784 generic.go:334] "Generic (PLEG): container finished" podID="fbbcb33d-d688-44bb-bc55-791e7269c3fa" containerID="1f5b712a8108b418cfdd98f97b2cfffee71948b615688dec98be80b41b3b508d" exitCode=0 Jan 06 08:30:05 crc kubenswrapper[4784]: I0106 08:30:05.281063 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" event={"ID":"fbbcb33d-d688-44bb-bc55-791e7269c3fa","Type":"ContainerDied","Data":"1f5b712a8108b418cfdd98f97b2cfffee71948b615688dec98be80b41b3b508d"} Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.124047 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-zdktt"] Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.125862 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-zdktt" Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.129261 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.129297 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-xh9vt" Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.130480 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.182630 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-zdktt"] Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.232447 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9m7vg\" (UniqueName: \"kubernetes.io/projected/05783b82-6aa2-457e-b49c-7a4de587f890-kube-api-access-9m7vg\") pod \"openstack-operator-index-zdktt\" (UID: \"05783b82-6aa2-457e-b49c-7a4de587f890\") " pod="openstack-operators/openstack-operator-index-zdktt" Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.333639 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9m7vg\" (UniqueName: \"kubernetes.io/projected/05783b82-6aa2-457e-b49c-7a4de587f890-kube-api-access-9m7vg\") pod \"openstack-operator-index-zdktt\" (UID: \"05783b82-6aa2-457e-b49c-7a4de587f890\") " pod="openstack-operators/openstack-operator-index-zdktt" Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.356709 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9m7vg\" (UniqueName: \"kubernetes.io/projected/05783b82-6aa2-457e-b49c-7a4de587f890-kube-api-access-9m7vg\") pod \"openstack-operator-index-zdktt\" (UID: \"05783b82-6aa2-457e-b49c-7a4de587f890\") " pod="openstack-operators/openstack-operator-index-zdktt" Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.499285 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-zdktt" Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.597006 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.730736 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-zdktt"] Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.740537 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fbbcb33d-d688-44bb-bc55-791e7269c3fa-config-volume\") pod \"fbbcb33d-d688-44bb-bc55-791e7269c3fa\" (UID: \"fbbcb33d-d688-44bb-bc55-791e7269c3fa\") " Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.740696 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fbbcb33d-d688-44bb-bc55-791e7269c3fa-secret-volume\") pod \"fbbcb33d-d688-44bb-bc55-791e7269c3fa\" (UID: \"fbbcb33d-d688-44bb-bc55-791e7269c3fa\") " Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.740802 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4pb5\" (UniqueName: \"kubernetes.io/projected/fbbcb33d-d688-44bb-bc55-791e7269c3fa-kube-api-access-c4pb5\") pod \"fbbcb33d-d688-44bb-bc55-791e7269c3fa\" (UID: \"fbbcb33d-d688-44bb-bc55-791e7269c3fa\") " Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.742296 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbbcb33d-d688-44bb-bc55-791e7269c3fa-config-volume" (OuterVolumeSpecName: "config-volume") pod "fbbcb33d-d688-44bb-bc55-791e7269c3fa" (UID: "fbbcb33d-d688-44bb-bc55-791e7269c3fa"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.748911 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbbcb33d-d688-44bb-bc55-791e7269c3fa-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "fbbcb33d-d688-44bb-bc55-791e7269c3fa" (UID: "fbbcb33d-d688-44bb-bc55-791e7269c3fa"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.749162 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fbbcb33d-d688-44bb-bc55-791e7269c3fa-kube-api-access-c4pb5" (OuterVolumeSpecName: "kube-api-access-c4pb5") pod "fbbcb33d-d688-44bb-bc55-791e7269c3fa" (UID: "fbbcb33d-d688-44bb-bc55-791e7269c3fa"). InnerVolumeSpecName "kube-api-access-c4pb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.843009 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4pb5\" (UniqueName: \"kubernetes.io/projected/fbbcb33d-d688-44bb-bc55-791e7269c3fa-kube-api-access-c4pb5\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.843057 4784 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fbbcb33d-d688-44bb-bc55-791e7269c3fa-config-volume\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:06 crc kubenswrapper[4784]: I0106 08:30:06.843069 4784 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fbbcb33d-d688-44bb-bc55-791e7269c3fa-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:07 crc kubenswrapper[4784]: I0106 08:30:07.300051 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-zdktt" event={"ID":"05783b82-6aa2-457e-b49c-7a4de587f890","Type":"ContainerStarted","Data":"7121ef6e0d8b5acfbe77c48ce0f5c86b5c1d7eb14393084bf4abcff650eba94b"} Jan 06 08:30:07 crc kubenswrapper[4784]: I0106 08:30:07.301910 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" event={"ID":"fbbcb33d-d688-44bb-bc55-791e7269c3fa","Type":"ContainerDied","Data":"87aedc3e666667fc8c28c99963be5f5c68bcd2dbc2afb16115441e465cc48558"} Jan 06 08:30:07 crc kubenswrapper[4784]: I0106 08:30:07.301980 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="87aedc3e666667fc8c28c99963be5f5c68bcd2dbc2afb16115441e465cc48558" Jan 06 08:30:07 crc kubenswrapper[4784]: I0106 08:30:07.302259 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6" Jan 06 08:30:09 crc kubenswrapper[4784]: I0106 08:30:09.319021 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-zdktt" event={"ID":"05783b82-6aa2-457e-b49c-7a4de587f890","Type":"ContainerStarted","Data":"29f105bd9111fdf20104016700aebe3a29ed858a58a09d53f6324261bad2e159"} Jan 06 08:30:09 crc kubenswrapper[4784]: I0106 08:30:09.342408 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-zdktt" podStartSLOduration=1.5047384670000001 podStartE2EDuration="3.342387174s" podCreationTimestamp="2026-01-06 08:30:06 +0000 UTC" firstStartedPulling="2026-01-06 08:30:06.72368423 +0000 UTC m=+908.769857087" lastFinishedPulling="2026-01-06 08:30:08.561332957 +0000 UTC m=+910.607505794" observedRunningTime="2026-01-06 08:30:09.339287442 +0000 UTC m=+911.385460349" watchObservedRunningTime="2026-01-06 08:30:09.342387174 +0000 UTC m=+911.388560021" Jan 06 08:30:09 crc kubenswrapper[4784]: I0106 08:30:09.489922 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-zdktt"] Jan 06 08:30:10 crc kubenswrapper[4784]: I0106 08:30:10.092253 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-r55lw"] Jan 06 08:30:10 crc kubenswrapper[4784]: E0106 08:30:10.092509 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbbcb33d-d688-44bb-bc55-791e7269c3fa" containerName="collect-profiles" Jan 06 08:30:10 crc kubenswrapper[4784]: I0106 08:30:10.092524 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbbcb33d-d688-44bb-bc55-791e7269c3fa" containerName="collect-profiles" Jan 06 08:30:10 crc kubenswrapper[4784]: I0106 08:30:10.093674 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbbcb33d-d688-44bb-bc55-791e7269c3fa" containerName="collect-profiles" Jan 06 08:30:10 crc kubenswrapper[4784]: I0106 08:30:10.094327 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-r55lw" Jan 06 08:30:10 crc kubenswrapper[4784]: I0106 08:30:10.112294 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-r55lw"] Jan 06 08:30:10 crc kubenswrapper[4784]: I0106 08:30:10.192501 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gs88n\" (UniqueName: \"kubernetes.io/projected/5f00cd83-9e7b-4df9-a67e-3b33461aa39b-kube-api-access-gs88n\") pod \"openstack-operator-index-r55lw\" (UID: \"5f00cd83-9e7b-4df9-a67e-3b33461aa39b\") " pod="openstack-operators/openstack-operator-index-r55lw" Jan 06 08:30:10 crc kubenswrapper[4784]: I0106 08:30:10.293537 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gs88n\" (UniqueName: \"kubernetes.io/projected/5f00cd83-9e7b-4df9-a67e-3b33461aa39b-kube-api-access-gs88n\") pod \"openstack-operator-index-r55lw\" (UID: \"5f00cd83-9e7b-4df9-a67e-3b33461aa39b\") " pod="openstack-operators/openstack-operator-index-r55lw" Jan 06 08:30:10 crc kubenswrapper[4784]: I0106 08:30:10.319685 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gs88n\" (UniqueName: \"kubernetes.io/projected/5f00cd83-9e7b-4df9-a67e-3b33461aa39b-kube-api-access-gs88n\") pod \"openstack-operator-index-r55lw\" (UID: \"5f00cd83-9e7b-4df9-a67e-3b33461aa39b\") " pod="openstack-operators/openstack-operator-index-r55lw" Jan 06 08:30:10 crc kubenswrapper[4784]: I0106 08:30:10.455428 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-r55lw" Jan 06 08:30:10 crc kubenswrapper[4784]: I0106 08:30:10.755214 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-r55lw"] Jan 06 08:30:11 crc kubenswrapper[4784]: I0106 08:30:11.338568 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-r55lw" event={"ID":"5f00cd83-9e7b-4df9-a67e-3b33461aa39b","Type":"ContainerStarted","Data":"a67c5b83a84cea46c17cefc1928c9605033761e621f6fd39d2a8a83b7e313672"} Jan 06 08:30:11 crc kubenswrapper[4784]: I0106 08:30:11.339098 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-zdktt" podUID="05783b82-6aa2-457e-b49c-7a4de587f890" containerName="registry-server" containerID="cri-o://29f105bd9111fdf20104016700aebe3a29ed858a58a09d53f6324261bad2e159" gracePeriod=2 Jan 06 08:30:11 crc kubenswrapper[4784]: I0106 08:30:11.747938 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-zdktt" Jan 06 08:30:11 crc kubenswrapper[4784]: I0106 08:30:11.918802 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9m7vg\" (UniqueName: \"kubernetes.io/projected/05783b82-6aa2-457e-b49c-7a4de587f890-kube-api-access-9m7vg\") pod \"05783b82-6aa2-457e-b49c-7a4de587f890\" (UID: \"05783b82-6aa2-457e-b49c-7a4de587f890\") " Jan 06 08:30:11 crc kubenswrapper[4784]: I0106 08:30:11.929799 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05783b82-6aa2-457e-b49c-7a4de587f890-kube-api-access-9m7vg" (OuterVolumeSpecName: "kube-api-access-9m7vg") pod "05783b82-6aa2-457e-b49c-7a4de587f890" (UID: "05783b82-6aa2-457e-b49c-7a4de587f890"). InnerVolumeSpecName "kube-api-access-9m7vg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:30:12 crc kubenswrapper[4784]: I0106 08:30:12.020972 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9m7vg\" (UniqueName: \"kubernetes.io/projected/05783b82-6aa2-457e-b49c-7a4de587f890-kube-api-access-9m7vg\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:12 crc kubenswrapper[4784]: I0106 08:30:12.352205 4784 generic.go:334] "Generic (PLEG): container finished" podID="05783b82-6aa2-457e-b49c-7a4de587f890" containerID="29f105bd9111fdf20104016700aebe3a29ed858a58a09d53f6324261bad2e159" exitCode=0 Jan 06 08:30:12 crc kubenswrapper[4784]: I0106 08:30:12.352263 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-zdktt" Jan 06 08:30:12 crc kubenswrapper[4784]: I0106 08:30:12.352301 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-zdktt" event={"ID":"05783b82-6aa2-457e-b49c-7a4de587f890","Type":"ContainerDied","Data":"29f105bd9111fdf20104016700aebe3a29ed858a58a09d53f6324261bad2e159"} Jan 06 08:30:12 crc kubenswrapper[4784]: I0106 08:30:12.352387 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-zdktt" event={"ID":"05783b82-6aa2-457e-b49c-7a4de587f890","Type":"ContainerDied","Data":"7121ef6e0d8b5acfbe77c48ce0f5c86b5c1d7eb14393084bf4abcff650eba94b"} Jan 06 08:30:12 crc kubenswrapper[4784]: I0106 08:30:12.352410 4784 scope.go:117] "RemoveContainer" containerID="29f105bd9111fdf20104016700aebe3a29ed858a58a09d53f6324261bad2e159" Jan 06 08:30:12 crc kubenswrapper[4784]: I0106 08:30:12.354336 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-r55lw" event={"ID":"5f00cd83-9e7b-4df9-a67e-3b33461aa39b","Type":"ContainerStarted","Data":"bd385106aabffc03f2d6e3c663e155e990b18f7b8b78dade864b795afb1d676d"} Jan 06 08:30:12 crc kubenswrapper[4784]: I0106 08:30:12.386848 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-r55lw" podStartSLOduration=1.8730497609999999 podStartE2EDuration="2.386827117s" podCreationTimestamp="2026-01-06 08:30:10 +0000 UTC" firstStartedPulling="2026-01-06 08:30:10.758592632 +0000 UTC m=+912.804765469" lastFinishedPulling="2026-01-06 08:30:11.272369958 +0000 UTC m=+913.318542825" observedRunningTime="2026-01-06 08:30:12.380616124 +0000 UTC m=+914.426788961" watchObservedRunningTime="2026-01-06 08:30:12.386827117 +0000 UTC m=+914.432999944" Jan 06 08:30:12 crc kubenswrapper[4784]: I0106 08:30:12.388980 4784 scope.go:117] "RemoveContainer" containerID="29f105bd9111fdf20104016700aebe3a29ed858a58a09d53f6324261bad2e159" Jan 06 08:30:12 crc kubenswrapper[4784]: E0106 08:30:12.389734 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29f105bd9111fdf20104016700aebe3a29ed858a58a09d53f6324261bad2e159\": container with ID starting with 29f105bd9111fdf20104016700aebe3a29ed858a58a09d53f6324261bad2e159 not found: ID does not exist" containerID="29f105bd9111fdf20104016700aebe3a29ed858a58a09d53f6324261bad2e159" Jan 06 08:30:12 crc kubenswrapper[4784]: I0106 08:30:12.389776 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29f105bd9111fdf20104016700aebe3a29ed858a58a09d53f6324261bad2e159"} err="failed to get container status \"29f105bd9111fdf20104016700aebe3a29ed858a58a09d53f6324261bad2e159\": rpc error: code = NotFound desc = could not find container \"29f105bd9111fdf20104016700aebe3a29ed858a58a09d53f6324261bad2e159\": container with ID starting with 29f105bd9111fdf20104016700aebe3a29ed858a58a09d53f6324261bad2e159 not found: ID does not exist" Jan 06 08:30:12 crc kubenswrapper[4784]: I0106 08:30:12.408519 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-zdktt"] Jan 06 08:30:12 crc kubenswrapper[4784]: I0106 08:30:12.420239 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-zdktt"] Jan 06 08:30:14 crc kubenswrapper[4784]: I0106 08:30:14.326536 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05783b82-6aa2-457e-b49c-7a4de587f890" path="/var/lib/kubelet/pods/05783b82-6aa2-457e-b49c-7a4de587f890/volumes" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.305703 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2f2z6"] Jan 06 08:30:20 crc kubenswrapper[4784]: E0106 08:30:20.307922 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05783b82-6aa2-457e-b49c-7a4de587f890" containerName="registry-server" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.308125 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="05783b82-6aa2-457e-b49c-7a4de587f890" containerName="registry-server" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.308480 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="05783b82-6aa2-457e-b49c-7a4de587f890" containerName="registry-server" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.310261 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.331240 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2f2z6"] Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.453755 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mggws\" (UniqueName: \"kubernetes.io/projected/21c386b9-c1a8-4783-804a-b6345e08e8ad-kube-api-access-mggws\") pod \"community-operators-2f2z6\" (UID: \"21c386b9-c1a8-4783-804a-b6345e08e8ad\") " pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.453847 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21c386b9-c1a8-4783-804a-b6345e08e8ad-catalog-content\") pod \"community-operators-2f2z6\" (UID: \"21c386b9-c1a8-4783-804a-b6345e08e8ad\") " pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.454116 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21c386b9-c1a8-4783-804a-b6345e08e8ad-utilities\") pod \"community-operators-2f2z6\" (UID: \"21c386b9-c1a8-4783-804a-b6345e08e8ad\") " pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.455618 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-r55lw" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.455677 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-r55lw" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.488317 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-r55lw" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.556271 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21c386b9-c1a8-4783-804a-b6345e08e8ad-utilities\") pod \"community-operators-2f2z6\" (UID: \"21c386b9-c1a8-4783-804a-b6345e08e8ad\") " pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.556443 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mggws\" (UniqueName: \"kubernetes.io/projected/21c386b9-c1a8-4783-804a-b6345e08e8ad-kube-api-access-mggws\") pod \"community-operators-2f2z6\" (UID: \"21c386b9-c1a8-4783-804a-b6345e08e8ad\") " pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.556489 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21c386b9-c1a8-4783-804a-b6345e08e8ad-catalog-content\") pod \"community-operators-2f2z6\" (UID: \"21c386b9-c1a8-4783-804a-b6345e08e8ad\") " pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.556877 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21c386b9-c1a8-4783-804a-b6345e08e8ad-utilities\") pod \"community-operators-2f2z6\" (UID: \"21c386b9-c1a8-4783-804a-b6345e08e8ad\") " pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.557218 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21c386b9-c1a8-4783-804a-b6345e08e8ad-catalog-content\") pod \"community-operators-2f2z6\" (UID: \"21c386b9-c1a8-4783-804a-b6345e08e8ad\") " pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.601049 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mggws\" (UniqueName: \"kubernetes.io/projected/21c386b9-c1a8-4783-804a-b6345e08e8ad-kube-api-access-mggws\") pod \"community-operators-2f2z6\" (UID: \"21c386b9-c1a8-4783-804a-b6345e08e8ad\") " pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:20 crc kubenswrapper[4784]: I0106 08:30:20.639988 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:21 crc kubenswrapper[4784]: I0106 08:30:21.085066 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2f2z6"] Jan 06 08:30:21 crc kubenswrapper[4784]: I0106 08:30:21.426369 4784 generic.go:334] "Generic (PLEG): container finished" podID="21c386b9-c1a8-4783-804a-b6345e08e8ad" containerID="7bfb4e04e5636761f9384f225058f3733a99395c19b81d45d462db0e0da55072" exitCode=0 Jan 06 08:30:21 crc kubenswrapper[4784]: I0106 08:30:21.426761 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2f2z6" event={"ID":"21c386b9-c1a8-4783-804a-b6345e08e8ad","Type":"ContainerDied","Data":"7bfb4e04e5636761f9384f225058f3733a99395c19b81d45d462db0e0da55072"} Jan 06 08:30:21 crc kubenswrapper[4784]: I0106 08:30:21.427087 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2f2z6" event={"ID":"21c386b9-c1a8-4783-804a-b6345e08e8ad","Type":"ContainerStarted","Data":"c47d61cf7085e18c0a8b3e17eba87e9a5a93296464e1e9f1a620d76afd690727"} Jan 06 08:30:21 crc kubenswrapper[4784]: I0106 08:30:21.491901 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-r55lw" Jan 06 08:30:22 crc kubenswrapper[4784]: I0106 08:30:22.433843 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2f2z6" event={"ID":"21c386b9-c1a8-4783-804a-b6345e08e8ad","Type":"ContainerStarted","Data":"af6c59ff88de5778be22030ba89d277867d5087fe8ed09edfc7a18e5aa079548"} Jan 06 08:30:23 crc kubenswrapper[4784]: I0106 08:30:23.445137 4784 generic.go:334] "Generic (PLEG): container finished" podID="21c386b9-c1a8-4783-804a-b6345e08e8ad" containerID="af6c59ff88de5778be22030ba89d277867d5087fe8ed09edfc7a18e5aa079548" exitCode=0 Jan 06 08:30:23 crc kubenswrapper[4784]: I0106 08:30:23.445238 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2f2z6" event={"ID":"21c386b9-c1a8-4783-804a-b6345e08e8ad","Type":"ContainerDied","Data":"af6c59ff88de5778be22030ba89d277867d5087fe8ed09edfc7a18e5aa079548"} Jan 06 08:30:24 crc kubenswrapper[4784]: I0106 08:30:24.455151 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2f2z6" event={"ID":"21c386b9-c1a8-4783-804a-b6345e08e8ad","Type":"ContainerStarted","Data":"1641bb16d77e9ff2f93b4e88b9310b248fb6197270b27cd23c689b65a871ebca"} Jan 06 08:30:24 crc kubenswrapper[4784]: I0106 08:30:24.485716 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2f2z6" podStartSLOduration=2.007158891 podStartE2EDuration="4.485690069s" podCreationTimestamp="2026-01-06 08:30:20 +0000 UTC" firstStartedPulling="2026-01-06 08:30:21.428599923 +0000 UTC m=+923.474772770" lastFinishedPulling="2026-01-06 08:30:23.907131071 +0000 UTC m=+925.953303948" observedRunningTime="2026-01-06 08:30:24.479344632 +0000 UTC m=+926.525517499" watchObservedRunningTime="2026-01-06 08:30:24.485690069 +0000 UTC m=+926.531862946" Jan 06 08:30:29 crc kubenswrapper[4784]: I0106 08:30:29.929756 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl"] Jan 06 08:30:29 crc kubenswrapper[4784]: I0106 08:30:29.932204 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" Jan 06 08:30:29 crc kubenswrapper[4784]: I0106 08:30:29.934602 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-52sfn" Jan 06 08:30:29 crc kubenswrapper[4784]: I0106 08:30:29.939064 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl"] Jan 06 08:30:30 crc kubenswrapper[4784]: I0106 08:30:30.118081 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1551dd5f-2b41-400e-a9ce-25df87d7935b-util\") pod \"78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl\" (UID: \"1551dd5f-2b41-400e-a9ce-25df87d7935b\") " pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" Jan 06 08:30:30 crc kubenswrapper[4784]: I0106 08:30:30.118168 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ww9b7\" (UniqueName: \"kubernetes.io/projected/1551dd5f-2b41-400e-a9ce-25df87d7935b-kube-api-access-ww9b7\") pod \"78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl\" (UID: \"1551dd5f-2b41-400e-a9ce-25df87d7935b\") " pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" Jan 06 08:30:30 crc kubenswrapper[4784]: I0106 08:30:30.118226 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1551dd5f-2b41-400e-a9ce-25df87d7935b-bundle\") pod \"78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl\" (UID: \"1551dd5f-2b41-400e-a9ce-25df87d7935b\") " pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" Jan 06 08:30:30 crc kubenswrapper[4784]: I0106 08:30:30.219506 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1551dd5f-2b41-400e-a9ce-25df87d7935b-bundle\") pod \"78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl\" (UID: \"1551dd5f-2b41-400e-a9ce-25df87d7935b\") " pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" Jan 06 08:30:30 crc kubenswrapper[4784]: I0106 08:30:30.219656 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1551dd5f-2b41-400e-a9ce-25df87d7935b-util\") pod \"78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl\" (UID: \"1551dd5f-2b41-400e-a9ce-25df87d7935b\") " pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" Jan 06 08:30:30 crc kubenswrapper[4784]: I0106 08:30:30.219719 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ww9b7\" (UniqueName: \"kubernetes.io/projected/1551dd5f-2b41-400e-a9ce-25df87d7935b-kube-api-access-ww9b7\") pod \"78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl\" (UID: \"1551dd5f-2b41-400e-a9ce-25df87d7935b\") " pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" Jan 06 08:30:30 crc kubenswrapper[4784]: I0106 08:30:30.220405 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1551dd5f-2b41-400e-a9ce-25df87d7935b-bundle\") pod \"78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl\" (UID: \"1551dd5f-2b41-400e-a9ce-25df87d7935b\") " pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" Jan 06 08:30:30 crc kubenswrapper[4784]: I0106 08:30:30.220444 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1551dd5f-2b41-400e-a9ce-25df87d7935b-util\") pod \"78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl\" (UID: \"1551dd5f-2b41-400e-a9ce-25df87d7935b\") " pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" Jan 06 08:30:30 crc kubenswrapper[4784]: I0106 08:30:30.251533 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ww9b7\" (UniqueName: \"kubernetes.io/projected/1551dd5f-2b41-400e-a9ce-25df87d7935b-kube-api-access-ww9b7\") pod \"78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl\" (UID: \"1551dd5f-2b41-400e-a9ce-25df87d7935b\") " pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" Jan 06 08:30:30 crc kubenswrapper[4784]: I0106 08:30:30.253606 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" Jan 06 08:30:30 crc kubenswrapper[4784]: I0106 08:30:30.641156 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:30 crc kubenswrapper[4784]: I0106 08:30:30.641659 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:30 crc kubenswrapper[4784]: I0106 08:30:30.694166 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:30 crc kubenswrapper[4784]: I0106 08:30:30.742271 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl"] Jan 06 08:30:31 crc kubenswrapper[4784]: I0106 08:30:31.515011 4784 generic.go:334] "Generic (PLEG): container finished" podID="1551dd5f-2b41-400e-a9ce-25df87d7935b" containerID="36ffbf74303f5a2211675dbd3197e238c0916eefacaca4a5902e9c8ee946a8e0" exitCode=0 Jan 06 08:30:31 crc kubenswrapper[4784]: I0106 08:30:31.515094 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" event={"ID":"1551dd5f-2b41-400e-a9ce-25df87d7935b","Type":"ContainerDied","Data":"36ffbf74303f5a2211675dbd3197e238c0916eefacaca4a5902e9c8ee946a8e0"} Jan 06 08:30:31 crc kubenswrapper[4784]: I0106 08:30:31.516902 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" event={"ID":"1551dd5f-2b41-400e-a9ce-25df87d7935b","Type":"ContainerStarted","Data":"e2d1b9e782c0b68dd8aae5682c062524f0f7f980078948f1459725804bc60009"} Jan 06 08:30:31 crc kubenswrapper[4784]: I0106 08:30:31.568473 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:33 crc kubenswrapper[4784]: I0106 08:30:33.490121 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2f2z6"] Jan 06 08:30:33 crc kubenswrapper[4784]: I0106 08:30:33.533620 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2f2z6" podUID="21c386b9-c1a8-4783-804a-b6345e08e8ad" containerName="registry-server" containerID="cri-o://1641bb16d77e9ff2f93b4e88b9310b248fb6197270b27cd23c689b65a871ebca" gracePeriod=2 Jan 06 08:30:33 crc kubenswrapper[4784]: I0106 08:30:33.533768 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" event={"ID":"1551dd5f-2b41-400e-a9ce-25df87d7935b","Type":"ContainerStarted","Data":"c65b367fb9483484b4f7936fc8c257e3fba31877cee5bc9e400c24eac23cc3be"} Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.542578 4784 generic.go:334] "Generic (PLEG): container finished" podID="1551dd5f-2b41-400e-a9ce-25df87d7935b" containerID="c65b367fb9483484b4f7936fc8c257e3fba31877cee5bc9e400c24eac23cc3be" exitCode=0 Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.542658 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" event={"ID":"1551dd5f-2b41-400e-a9ce-25df87d7935b","Type":"ContainerDied","Data":"c65b367fb9483484b4f7936fc8c257e3fba31877cee5bc9e400c24eac23cc3be"} Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.548819 4784 generic.go:334] "Generic (PLEG): container finished" podID="21c386b9-c1a8-4783-804a-b6345e08e8ad" containerID="1641bb16d77e9ff2f93b4e88b9310b248fb6197270b27cd23c689b65a871ebca" exitCode=0 Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.548900 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2f2z6" event={"ID":"21c386b9-c1a8-4783-804a-b6345e08e8ad","Type":"ContainerDied","Data":"1641bb16d77e9ff2f93b4e88b9310b248fb6197270b27cd23c689b65a871ebca"} Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.548936 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2f2z6" event={"ID":"21c386b9-c1a8-4783-804a-b6345e08e8ad","Type":"ContainerDied","Data":"c47d61cf7085e18c0a8b3e17eba87e9a5a93296464e1e9f1a620d76afd690727"} Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.548948 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c47d61cf7085e18c0a8b3e17eba87e9a5a93296464e1e9f1a620d76afd690727" Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.568929 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.686421 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mggws\" (UniqueName: \"kubernetes.io/projected/21c386b9-c1a8-4783-804a-b6345e08e8ad-kube-api-access-mggws\") pod \"21c386b9-c1a8-4783-804a-b6345e08e8ad\" (UID: \"21c386b9-c1a8-4783-804a-b6345e08e8ad\") " Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.686538 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21c386b9-c1a8-4783-804a-b6345e08e8ad-catalog-content\") pod \"21c386b9-c1a8-4783-804a-b6345e08e8ad\" (UID: \"21c386b9-c1a8-4783-804a-b6345e08e8ad\") " Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.686871 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21c386b9-c1a8-4783-804a-b6345e08e8ad-utilities\") pod \"21c386b9-c1a8-4783-804a-b6345e08e8ad\" (UID: \"21c386b9-c1a8-4783-804a-b6345e08e8ad\") " Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.691070 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/21c386b9-c1a8-4783-804a-b6345e08e8ad-utilities" (OuterVolumeSpecName: "utilities") pod "21c386b9-c1a8-4783-804a-b6345e08e8ad" (UID: "21c386b9-c1a8-4783-804a-b6345e08e8ad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.696614 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21c386b9-c1a8-4783-804a-b6345e08e8ad-kube-api-access-mggws" (OuterVolumeSpecName: "kube-api-access-mggws") pod "21c386b9-c1a8-4783-804a-b6345e08e8ad" (UID: "21c386b9-c1a8-4783-804a-b6345e08e8ad"). InnerVolumeSpecName "kube-api-access-mggws". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.759330 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/21c386b9-c1a8-4783-804a-b6345e08e8ad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "21c386b9-c1a8-4783-804a-b6345e08e8ad" (UID: "21c386b9-c1a8-4783-804a-b6345e08e8ad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.788005 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mggws\" (UniqueName: \"kubernetes.io/projected/21c386b9-c1a8-4783-804a-b6345e08e8ad-kube-api-access-mggws\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.788056 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21c386b9-c1a8-4783-804a-b6345e08e8ad-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:34 crc kubenswrapper[4784]: I0106 08:30:34.788070 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21c386b9-c1a8-4783-804a-b6345e08e8ad-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:35 crc kubenswrapper[4784]: I0106 08:30:35.581519 4784 generic.go:334] "Generic (PLEG): container finished" podID="1551dd5f-2b41-400e-a9ce-25df87d7935b" containerID="d6840324a2894d10e3e7575d6d60cb99a0d60379b5ce21245ed96405e1f02e0a" exitCode=0 Jan 06 08:30:35 crc kubenswrapper[4784]: I0106 08:30:35.581769 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2f2z6" Jan 06 08:30:35 crc kubenswrapper[4784]: I0106 08:30:35.581824 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" event={"ID":"1551dd5f-2b41-400e-a9ce-25df87d7935b","Type":"ContainerDied","Data":"d6840324a2894d10e3e7575d6d60cb99a0d60379b5ce21245ed96405e1f02e0a"} Jan 06 08:30:35 crc kubenswrapper[4784]: I0106 08:30:35.653045 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2f2z6"] Jan 06 08:30:35 crc kubenswrapper[4784]: I0106 08:30:35.658849 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2f2z6"] Jan 06 08:30:36 crc kubenswrapper[4784]: I0106 08:30:36.325464 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21c386b9-c1a8-4783-804a-b6345e08e8ad" path="/var/lib/kubelet/pods/21c386b9-c1a8-4783-804a-b6345e08e8ad/volumes" Jan 06 08:30:36 crc kubenswrapper[4784]: I0106 08:30:36.935123 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.127482 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1551dd5f-2b41-400e-a9ce-25df87d7935b-util\") pod \"1551dd5f-2b41-400e-a9ce-25df87d7935b\" (UID: \"1551dd5f-2b41-400e-a9ce-25df87d7935b\") " Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.127716 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ww9b7\" (UniqueName: \"kubernetes.io/projected/1551dd5f-2b41-400e-a9ce-25df87d7935b-kube-api-access-ww9b7\") pod \"1551dd5f-2b41-400e-a9ce-25df87d7935b\" (UID: \"1551dd5f-2b41-400e-a9ce-25df87d7935b\") " Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.127770 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1551dd5f-2b41-400e-a9ce-25df87d7935b-bundle\") pod \"1551dd5f-2b41-400e-a9ce-25df87d7935b\" (UID: \"1551dd5f-2b41-400e-a9ce-25df87d7935b\") " Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.128635 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1551dd5f-2b41-400e-a9ce-25df87d7935b-bundle" (OuterVolumeSpecName: "bundle") pod "1551dd5f-2b41-400e-a9ce-25df87d7935b" (UID: "1551dd5f-2b41-400e-a9ce-25df87d7935b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.137888 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1551dd5f-2b41-400e-a9ce-25df87d7935b-util" (OuterVolumeSpecName: "util") pod "1551dd5f-2b41-400e-a9ce-25df87d7935b" (UID: "1551dd5f-2b41-400e-a9ce-25df87d7935b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.148023 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1551dd5f-2b41-400e-a9ce-25df87d7935b-kube-api-access-ww9b7" (OuterVolumeSpecName: "kube-api-access-ww9b7") pod "1551dd5f-2b41-400e-a9ce-25df87d7935b" (UID: "1551dd5f-2b41-400e-a9ce-25df87d7935b"). InnerVolumeSpecName "kube-api-access-ww9b7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.229888 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ww9b7\" (UniqueName: \"kubernetes.io/projected/1551dd5f-2b41-400e-a9ce-25df87d7935b-kube-api-access-ww9b7\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.229934 4784 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1551dd5f-2b41-400e-a9ce-25df87d7935b-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.229947 4784 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1551dd5f-2b41-400e-a9ce-25df87d7935b-util\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.297722 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-clwtw"] Jan 06 08:30:37 crc kubenswrapper[4784]: E0106 08:30:37.298205 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1551dd5f-2b41-400e-a9ce-25df87d7935b" containerName="extract" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.298230 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="1551dd5f-2b41-400e-a9ce-25df87d7935b" containerName="extract" Jan 06 08:30:37 crc kubenswrapper[4784]: E0106 08:30:37.298247 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21c386b9-c1a8-4783-804a-b6345e08e8ad" containerName="extract-content" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.298255 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="21c386b9-c1a8-4783-804a-b6345e08e8ad" containerName="extract-content" Jan 06 08:30:37 crc kubenswrapper[4784]: E0106 08:30:37.298265 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21c386b9-c1a8-4783-804a-b6345e08e8ad" containerName="extract-utilities" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.298273 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="21c386b9-c1a8-4783-804a-b6345e08e8ad" containerName="extract-utilities" Jan 06 08:30:37 crc kubenswrapper[4784]: E0106 08:30:37.298295 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21c386b9-c1a8-4783-804a-b6345e08e8ad" containerName="registry-server" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.298303 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="21c386b9-c1a8-4783-804a-b6345e08e8ad" containerName="registry-server" Jan 06 08:30:37 crc kubenswrapper[4784]: E0106 08:30:37.298312 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1551dd5f-2b41-400e-a9ce-25df87d7935b" containerName="pull" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.298319 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="1551dd5f-2b41-400e-a9ce-25df87d7935b" containerName="pull" Jan 06 08:30:37 crc kubenswrapper[4784]: E0106 08:30:37.298334 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1551dd5f-2b41-400e-a9ce-25df87d7935b" containerName="util" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.298340 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="1551dd5f-2b41-400e-a9ce-25df87d7935b" containerName="util" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.298459 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="21c386b9-c1a8-4783-804a-b6345e08e8ad" containerName="registry-server" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.298469 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="1551dd5f-2b41-400e-a9ce-25df87d7935b" containerName="extract" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.299841 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.319657 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-clwtw"] Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.332460 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d17eb6f2-2eee-452f-a311-f5f266264bfb-utilities\") pod \"redhat-marketplace-clwtw\" (UID: \"d17eb6f2-2eee-452f-a311-f5f266264bfb\") " pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.332631 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d17eb6f2-2eee-452f-a311-f5f266264bfb-catalog-content\") pod \"redhat-marketplace-clwtw\" (UID: \"d17eb6f2-2eee-452f-a311-f5f266264bfb\") " pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.332792 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgpft\" (UniqueName: \"kubernetes.io/projected/d17eb6f2-2eee-452f-a311-f5f266264bfb-kube-api-access-zgpft\") pod \"redhat-marketplace-clwtw\" (UID: \"d17eb6f2-2eee-452f-a311-f5f266264bfb\") " pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.433413 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgpft\" (UniqueName: \"kubernetes.io/projected/d17eb6f2-2eee-452f-a311-f5f266264bfb-kube-api-access-zgpft\") pod \"redhat-marketplace-clwtw\" (UID: \"d17eb6f2-2eee-452f-a311-f5f266264bfb\") " pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.433896 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d17eb6f2-2eee-452f-a311-f5f266264bfb-utilities\") pod \"redhat-marketplace-clwtw\" (UID: \"d17eb6f2-2eee-452f-a311-f5f266264bfb\") " pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.434059 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d17eb6f2-2eee-452f-a311-f5f266264bfb-catalog-content\") pod \"redhat-marketplace-clwtw\" (UID: \"d17eb6f2-2eee-452f-a311-f5f266264bfb\") " pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.434604 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d17eb6f2-2eee-452f-a311-f5f266264bfb-utilities\") pod \"redhat-marketplace-clwtw\" (UID: \"d17eb6f2-2eee-452f-a311-f5f266264bfb\") " pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.434664 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d17eb6f2-2eee-452f-a311-f5f266264bfb-catalog-content\") pod \"redhat-marketplace-clwtw\" (UID: \"d17eb6f2-2eee-452f-a311-f5f266264bfb\") " pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.452845 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgpft\" (UniqueName: \"kubernetes.io/projected/d17eb6f2-2eee-452f-a311-f5f266264bfb-kube-api-access-zgpft\") pod \"redhat-marketplace-clwtw\" (UID: \"d17eb6f2-2eee-452f-a311-f5f266264bfb\") " pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.599556 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" event={"ID":"1551dd5f-2b41-400e-a9ce-25df87d7935b","Type":"ContainerDied","Data":"e2d1b9e782c0b68dd8aae5682c062524f0f7f980078948f1459725804bc60009"} Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.599986 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2d1b9e782c0b68dd8aae5682c062524f0f7f980078948f1459725804bc60009" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.599634 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.643349 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:37 crc kubenswrapper[4784]: I0106 08:30:37.926438 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-clwtw"] Jan 06 08:30:38 crc kubenswrapper[4784]: I0106 08:30:38.609567 4784 generic.go:334] "Generic (PLEG): container finished" podID="d17eb6f2-2eee-452f-a311-f5f266264bfb" containerID="325fdde5450c71e6aefd70fc4730688c0147c5fe2bde712b0aa1d23dad4d6a54" exitCode=0 Jan 06 08:30:38 crc kubenswrapper[4784]: I0106 08:30:38.609657 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clwtw" event={"ID":"d17eb6f2-2eee-452f-a311-f5f266264bfb","Type":"ContainerDied","Data":"325fdde5450c71e6aefd70fc4730688c0147c5fe2bde712b0aa1d23dad4d6a54"} Jan 06 08:30:38 crc kubenswrapper[4784]: I0106 08:30:38.609852 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clwtw" event={"ID":"d17eb6f2-2eee-452f-a311-f5f266264bfb","Type":"ContainerStarted","Data":"21a52b8edf324bfd160aaac470443c276a388b30efefe99a1f249d5f197d8a6b"} Jan 06 08:30:38 crc kubenswrapper[4784]: I0106 08:30:38.691921 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z7qsg"] Jan 06 08:30:38 crc kubenswrapper[4784]: I0106 08:30:38.693561 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:38 crc kubenswrapper[4784]: I0106 08:30:38.716844 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z7qsg"] Jan 06 08:30:38 crc kubenswrapper[4784]: I0106 08:30:38.754028 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39f91e81-a169-4045-acdd-ab2b5b6c7b70-utilities\") pod \"certified-operators-z7qsg\" (UID: \"39f91e81-a169-4045-acdd-ab2b5b6c7b70\") " pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:38 crc kubenswrapper[4784]: I0106 08:30:38.754089 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brnwl\" (UniqueName: \"kubernetes.io/projected/39f91e81-a169-4045-acdd-ab2b5b6c7b70-kube-api-access-brnwl\") pod \"certified-operators-z7qsg\" (UID: \"39f91e81-a169-4045-acdd-ab2b5b6c7b70\") " pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:38 crc kubenswrapper[4784]: I0106 08:30:38.754139 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39f91e81-a169-4045-acdd-ab2b5b6c7b70-catalog-content\") pod \"certified-operators-z7qsg\" (UID: \"39f91e81-a169-4045-acdd-ab2b5b6c7b70\") " pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:38 crc kubenswrapper[4784]: I0106 08:30:38.854568 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39f91e81-a169-4045-acdd-ab2b5b6c7b70-utilities\") pod \"certified-operators-z7qsg\" (UID: \"39f91e81-a169-4045-acdd-ab2b5b6c7b70\") " pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:38 crc kubenswrapper[4784]: I0106 08:30:38.854629 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brnwl\" (UniqueName: \"kubernetes.io/projected/39f91e81-a169-4045-acdd-ab2b5b6c7b70-kube-api-access-brnwl\") pod \"certified-operators-z7qsg\" (UID: \"39f91e81-a169-4045-acdd-ab2b5b6c7b70\") " pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:38 crc kubenswrapper[4784]: I0106 08:30:38.854678 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39f91e81-a169-4045-acdd-ab2b5b6c7b70-catalog-content\") pod \"certified-operators-z7qsg\" (UID: \"39f91e81-a169-4045-acdd-ab2b5b6c7b70\") " pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:38 crc kubenswrapper[4784]: I0106 08:30:38.855225 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39f91e81-a169-4045-acdd-ab2b5b6c7b70-utilities\") pod \"certified-operators-z7qsg\" (UID: \"39f91e81-a169-4045-acdd-ab2b5b6c7b70\") " pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:38 crc kubenswrapper[4784]: I0106 08:30:38.855293 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39f91e81-a169-4045-acdd-ab2b5b6c7b70-catalog-content\") pod \"certified-operators-z7qsg\" (UID: \"39f91e81-a169-4045-acdd-ab2b5b6c7b70\") " pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:38 crc kubenswrapper[4784]: I0106 08:30:38.912556 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brnwl\" (UniqueName: \"kubernetes.io/projected/39f91e81-a169-4045-acdd-ab2b5b6c7b70-kube-api-access-brnwl\") pod \"certified-operators-z7qsg\" (UID: \"39f91e81-a169-4045-acdd-ab2b5b6c7b70\") " pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:39 crc kubenswrapper[4784]: I0106 08:30:39.019712 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:39 crc kubenswrapper[4784]: I0106 08:30:39.297008 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z7qsg"] Jan 06 08:30:39 crc kubenswrapper[4784]: W0106 08:30:39.307963 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39f91e81_a169_4045_acdd_ab2b5b6c7b70.slice/crio-5291c9e8f15b4b4d770872b66c3b1bc5ccbdffe40a3a91ea3e352c526a707056 WatchSource:0}: Error finding container 5291c9e8f15b4b4d770872b66c3b1bc5ccbdffe40a3a91ea3e352c526a707056: Status 404 returned error can't find the container with id 5291c9e8f15b4b4d770872b66c3b1bc5ccbdffe40a3a91ea3e352c526a707056 Jan 06 08:30:39 crc kubenswrapper[4784]: I0106 08:30:39.618595 4784 generic.go:334] "Generic (PLEG): container finished" podID="d17eb6f2-2eee-452f-a311-f5f266264bfb" containerID="e008df8bb6bc5bb0e504e6082342682db7fc4b5ebab554ef0a5c56b796522baa" exitCode=0 Jan 06 08:30:39 crc kubenswrapper[4784]: I0106 08:30:39.618684 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clwtw" event={"ID":"d17eb6f2-2eee-452f-a311-f5f266264bfb","Type":"ContainerDied","Data":"e008df8bb6bc5bb0e504e6082342682db7fc4b5ebab554ef0a5c56b796522baa"} Jan 06 08:30:39 crc kubenswrapper[4784]: I0106 08:30:39.621597 4784 generic.go:334] "Generic (PLEG): container finished" podID="39f91e81-a169-4045-acdd-ab2b5b6c7b70" containerID="344c0dd81a38a8d1514b4f0f6a0b2fd3e49e8ee2d45e6c89801ec8e1283d43c0" exitCode=0 Jan 06 08:30:39 crc kubenswrapper[4784]: I0106 08:30:39.621636 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z7qsg" event={"ID":"39f91e81-a169-4045-acdd-ab2b5b6c7b70","Type":"ContainerDied","Data":"344c0dd81a38a8d1514b4f0f6a0b2fd3e49e8ee2d45e6c89801ec8e1283d43c0"} Jan 06 08:30:39 crc kubenswrapper[4784]: I0106 08:30:39.621663 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z7qsg" event={"ID":"39f91e81-a169-4045-acdd-ab2b5b6c7b70","Type":"ContainerStarted","Data":"5291c9e8f15b4b4d770872b66c3b1bc5ccbdffe40a3a91ea3e352c526a707056"} Jan 06 08:30:40 crc kubenswrapper[4784]: I0106 08:30:40.632579 4784 generic.go:334] "Generic (PLEG): container finished" podID="39f91e81-a169-4045-acdd-ab2b5b6c7b70" containerID="c4fc5a28176a4448df0b7ff71b01c32b4bfbc4f21d93d989fc94855e373ad36a" exitCode=0 Jan 06 08:30:40 crc kubenswrapper[4784]: I0106 08:30:40.632763 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z7qsg" event={"ID":"39f91e81-a169-4045-acdd-ab2b5b6c7b70","Type":"ContainerDied","Data":"c4fc5a28176a4448df0b7ff71b01c32b4bfbc4f21d93d989fc94855e373ad36a"} Jan 06 08:30:40 crc kubenswrapper[4784]: I0106 08:30:40.638222 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clwtw" event={"ID":"d17eb6f2-2eee-452f-a311-f5f266264bfb","Type":"ContainerStarted","Data":"c9725b4c4a9ac96aca5e7a9dea6c00c186f161f92f7510bae9830f5973b51972"} Jan 06 08:30:40 crc kubenswrapper[4784]: I0106 08:30:40.724385 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-clwtw" podStartSLOduration=2.247932591 podStartE2EDuration="3.724361804s" podCreationTimestamp="2026-01-06 08:30:37 +0000 UTC" firstStartedPulling="2026-01-06 08:30:38.61202541 +0000 UTC m=+940.658198247" lastFinishedPulling="2026-01-06 08:30:40.088454623 +0000 UTC m=+942.134627460" observedRunningTime="2026-01-06 08:30:40.720184692 +0000 UTC m=+942.766357519" watchObservedRunningTime="2026-01-06 08:30:40.724361804 +0000 UTC m=+942.770534641" Jan 06 08:30:41 crc kubenswrapper[4784]: I0106 08:30:41.215922 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7746f6c5b8-f94bg"] Jan 06 08:30:41 crc kubenswrapper[4784]: I0106 08:30:41.216711 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7746f6c5b8-f94bg" Jan 06 08:30:41 crc kubenswrapper[4784]: I0106 08:30:41.219210 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-74dwm" Jan 06 08:30:41 crc kubenswrapper[4784]: I0106 08:30:41.256559 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7746f6c5b8-f94bg"] Jan 06 08:30:41 crc kubenswrapper[4784]: I0106 08:30:41.388331 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwmtp\" (UniqueName: \"kubernetes.io/projected/90e35bd2-b005-4bb0-a024-9e213e8ba6ab-kube-api-access-hwmtp\") pod \"openstack-operator-controller-operator-7746f6c5b8-f94bg\" (UID: \"90e35bd2-b005-4bb0-a024-9e213e8ba6ab\") " pod="openstack-operators/openstack-operator-controller-operator-7746f6c5b8-f94bg" Jan 06 08:30:41 crc kubenswrapper[4784]: I0106 08:30:41.490233 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwmtp\" (UniqueName: \"kubernetes.io/projected/90e35bd2-b005-4bb0-a024-9e213e8ba6ab-kube-api-access-hwmtp\") pod \"openstack-operator-controller-operator-7746f6c5b8-f94bg\" (UID: \"90e35bd2-b005-4bb0-a024-9e213e8ba6ab\") " pod="openstack-operators/openstack-operator-controller-operator-7746f6c5b8-f94bg" Jan 06 08:30:41 crc kubenswrapper[4784]: I0106 08:30:41.512700 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwmtp\" (UniqueName: \"kubernetes.io/projected/90e35bd2-b005-4bb0-a024-9e213e8ba6ab-kube-api-access-hwmtp\") pod \"openstack-operator-controller-operator-7746f6c5b8-f94bg\" (UID: \"90e35bd2-b005-4bb0-a024-9e213e8ba6ab\") " pod="openstack-operators/openstack-operator-controller-operator-7746f6c5b8-f94bg" Jan 06 08:30:41 crc kubenswrapper[4784]: I0106 08:30:41.557954 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7746f6c5b8-f94bg" Jan 06 08:30:41 crc kubenswrapper[4784]: I0106 08:30:41.655039 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z7qsg" event={"ID":"39f91e81-a169-4045-acdd-ab2b5b6c7b70","Type":"ContainerStarted","Data":"6178aeecd34bec0077052dcc0a9dc2377caa9d0293db2ac908dbd67365d97315"} Jan 06 08:30:41 crc kubenswrapper[4784]: I0106 08:30:41.685369 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z7qsg" podStartSLOduration=2.183188794 podStartE2EDuration="3.685349666s" podCreationTimestamp="2026-01-06 08:30:38 +0000 UTC" firstStartedPulling="2026-01-06 08:30:39.62314849 +0000 UTC m=+941.669321327" lastFinishedPulling="2026-01-06 08:30:41.125309322 +0000 UTC m=+943.171482199" observedRunningTime="2026-01-06 08:30:41.680083 +0000 UTC m=+943.726255867" watchObservedRunningTime="2026-01-06 08:30:41.685349666 +0000 UTC m=+943.731522503" Jan 06 08:30:41 crc kubenswrapper[4784]: I0106 08:30:41.821330 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7746f6c5b8-f94bg"] Jan 06 08:30:42 crc kubenswrapper[4784]: I0106 08:30:42.663465 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7746f6c5b8-f94bg" event={"ID":"90e35bd2-b005-4bb0-a024-9e213e8ba6ab","Type":"ContainerStarted","Data":"8c95997de16d049dd57827983569572182071d4faec6aae196e2a0cee075c5e1"} Jan 06 08:30:44 crc kubenswrapper[4784]: I0106 08:30:44.356154 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:30:44 crc kubenswrapper[4784]: I0106 08:30:44.356231 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:30:47 crc kubenswrapper[4784]: I0106 08:30:47.644273 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:47 crc kubenswrapper[4784]: I0106 08:30:47.644703 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:47 crc kubenswrapper[4784]: I0106 08:30:47.698132 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:47 crc kubenswrapper[4784]: I0106 08:30:47.766676 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:49 crc kubenswrapper[4784]: I0106 08:30:49.021048 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:49 crc kubenswrapper[4784]: I0106 08:30:49.021491 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:49 crc kubenswrapper[4784]: I0106 08:30:49.092780 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:49 crc kubenswrapper[4784]: I0106 08:30:49.796326 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:50 crc kubenswrapper[4784]: I0106 08:30:50.483970 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-clwtw"] Jan 06 08:30:50 crc kubenswrapper[4784]: I0106 08:30:50.742603 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-clwtw" podUID="d17eb6f2-2eee-452f-a311-f5f266264bfb" containerName="registry-server" containerID="cri-o://c9725b4c4a9ac96aca5e7a9dea6c00c186f161f92f7510bae9830f5973b51972" gracePeriod=2 Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.736442 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.756290 4784 generic.go:334] "Generic (PLEG): container finished" podID="d17eb6f2-2eee-452f-a311-f5f266264bfb" containerID="c9725b4c4a9ac96aca5e7a9dea6c00c186f161f92f7510bae9830f5973b51972" exitCode=0 Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.756407 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clwtw" event={"ID":"d17eb6f2-2eee-452f-a311-f5f266264bfb","Type":"ContainerDied","Data":"c9725b4c4a9ac96aca5e7a9dea6c00c186f161f92f7510bae9830f5973b51972"} Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.756456 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clwtw" event={"ID":"d17eb6f2-2eee-452f-a311-f5f266264bfb","Type":"ContainerDied","Data":"21a52b8edf324bfd160aaac470443c276a388b30efefe99a1f249d5f197d8a6b"} Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.756482 4784 scope.go:117] "RemoveContainer" containerID="c9725b4c4a9ac96aca5e7a9dea6c00c186f161f92f7510bae9830f5973b51972" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.756528 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clwtw" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.768809 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7746f6c5b8-f94bg" event={"ID":"90e35bd2-b005-4bb0-a024-9e213e8ba6ab","Type":"ContainerStarted","Data":"7ad86045b378789d6927c6cbace90e48101a99245d61198c668580eb12d6f053"} Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.770528 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-7746f6c5b8-f94bg" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.794319 4784 scope.go:117] "RemoveContainer" containerID="e008df8bb6bc5bb0e504e6082342682db7fc4b5ebab554ef0a5c56b796522baa" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.808355 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-7746f6c5b8-f94bg" podStartSLOduration=2.006695919 podStartE2EDuration="10.808337489s" podCreationTimestamp="2026-01-06 08:30:41 +0000 UTC" firstStartedPulling="2026-01-06 08:30:41.836463042 +0000 UTC m=+943.882635879" lastFinishedPulling="2026-01-06 08:30:50.638104622 +0000 UTC m=+952.684277449" observedRunningTime="2026-01-06 08:30:51.804394624 +0000 UTC m=+953.850567461" watchObservedRunningTime="2026-01-06 08:30:51.808337489 +0000 UTC m=+953.854510326" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.842845 4784 scope.go:117] "RemoveContainer" containerID="325fdde5450c71e6aefd70fc4730688c0147c5fe2bde712b0aa1d23dad4d6a54" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.857032 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgpft\" (UniqueName: \"kubernetes.io/projected/d17eb6f2-2eee-452f-a311-f5f266264bfb-kube-api-access-zgpft\") pod \"d17eb6f2-2eee-452f-a311-f5f266264bfb\" (UID: \"d17eb6f2-2eee-452f-a311-f5f266264bfb\") " Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.857151 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d17eb6f2-2eee-452f-a311-f5f266264bfb-utilities\") pod \"d17eb6f2-2eee-452f-a311-f5f266264bfb\" (UID: \"d17eb6f2-2eee-452f-a311-f5f266264bfb\") " Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.857215 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d17eb6f2-2eee-452f-a311-f5f266264bfb-catalog-content\") pod \"d17eb6f2-2eee-452f-a311-f5f266264bfb\" (UID: \"d17eb6f2-2eee-452f-a311-f5f266264bfb\") " Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.866436 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d17eb6f2-2eee-452f-a311-f5f266264bfb-utilities" (OuterVolumeSpecName: "utilities") pod "d17eb6f2-2eee-452f-a311-f5f266264bfb" (UID: "d17eb6f2-2eee-452f-a311-f5f266264bfb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.867078 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d17eb6f2-2eee-452f-a311-f5f266264bfb-kube-api-access-zgpft" (OuterVolumeSpecName: "kube-api-access-zgpft") pod "d17eb6f2-2eee-452f-a311-f5f266264bfb" (UID: "d17eb6f2-2eee-452f-a311-f5f266264bfb"). InnerVolumeSpecName "kube-api-access-zgpft". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.871497 4784 scope.go:117] "RemoveContainer" containerID="c9725b4c4a9ac96aca5e7a9dea6c00c186f161f92f7510bae9830f5973b51972" Jan 06 08:30:51 crc kubenswrapper[4784]: E0106 08:30:51.878825 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9725b4c4a9ac96aca5e7a9dea6c00c186f161f92f7510bae9830f5973b51972\": container with ID starting with c9725b4c4a9ac96aca5e7a9dea6c00c186f161f92f7510bae9830f5973b51972 not found: ID does not exist" containerID="c9725b4c4a9ac96aca5e7a9dea6c00c186f161f92f7510bae9830f5973b51972" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.878893 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9725b4c4a9ac96aca5e7a9dea6c00c186f161f92f7510bae9830f5973b51972"} err="failed to get container status \"c9725b4c4a9ac96aca5e7a9dea6c00c186f161f92f7510bae9830f5973b51972\": rpc error: code = NotFound desc = could not find container \"c9725b4c4a9ac96aca5e7a9dea6c00c186f161f92f7510bae9830f5973b51972\": container with ID starting with c9725b4c4a9ac96aca5e7a9dea6c00c186f161f92f7510bae9830f5973b51972 not found: ID does not exist" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.878932 4784 scope.go:117] "RemoveContainer" containerID="e008df8bb6bc5bb0e504e6082342682db7fc4b5ebab554ef0a5c56b796522baa" Jan 06 08:30:51 crc kubenswrapper[4784]: E0106 08:30:51.879880 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e008df8bb6bc5bb0e504e6082342682db7fc4b5ebab554ef0a5c56b796522baa\": container with ID starting with e008df8bb6bc5bb0e504e6082342682db7fc4b5ebab554ef0a5c56b796522baa not found: ID does not exist" containerID="e008df8bb6bc5bb0e504e6082342682db7fc4b5ebab554ef0a5c56b796522baa" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.879949 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e008df8bb6bc5bb0e504e6082342682db7fc4b5ebab554ef0a5c56b796522baa"} err="failed to get container status \"e008df8bb6bc5bb0e504e6082342682db7fc4b5ebab554ef0a5c56b796522baa\": rpc error: code = NotFound desc = could not find container \"e008df8bb6bc5bb0e504e6082342682db7fc4b5ebab554ef0a5c56b796522baa\": container with ID starting with e008df8bb6bc5bb0e504e6082342682db7fc4b5ebab554ef0a5c56b796522baa not found: ID does not exist" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.879986 4784 scope.go:117] "RemoveContainer" containerID="325fdde5450c71e6aefd70fc4730688c0147c5fe2bde712b0aa1d23dad4d6a54" Jan 06 08:30:51 crc kubenswrapper[4784]: E0106 08:30:51.885136 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"325fdde5450c71e6aefd70fc4730688c0147c5fe2bde712b0aa1d23dad4d6a54\": container with ID starting with 325fdde5450c71e6aefd70fc4730688c0147c5fe2bde712b0aa1d23dad4d6a54 not found: ID does not exist" containerID="325fdde5450c71e6aefd70fc4730688c0147c5fe2bde712b0aa1d23dad4d6a54" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.885168 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"325fdde5450c71e6aefd70fc4730688c0147c5fe2bde712b0aa1d23dad4d6a54"} err="failed to get container status \"325fdde5450c71e6aefd70fc4730688c0147c5fe2bde712b0aa1d23dad4d6a54\": rpc error: code = NotFound desc = could not find container \"325fdde5450c71e6aefd70fc4730688c0147c5fe2bde712b0aa1d23dad4d6a54\": container with ID starting with 325fdde5450c71e6aefd70fc4730688c0147c5fe2bde712b0aa1d23dad4d6a54 not found: ID does not exist" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.889343 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d17eb6f2-2eee-452f-a311-f5f266264bfb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d17eb6f2-2eee-452f-a311-f5f266264bfb" (UID: "d17eb6f2-2eee-452f-a311-f5f266264bfb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.958872 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d17eb6f2-2eee-452f-a311-f5f266264bfb-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.958907 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d17eb6f2-2eee-452f-a311-f5f266264bfb-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:51 crc kubenswrapper[4784]: I0106 08:30:51.958924 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgpft\" (UniqueName: \"kubernetes.io/projected/d17eb6f2-2eee-452f-a311-f5f266264bfb-kube-api-access-zgpft\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:52 crc kubenswrapper[4784]: I0106 08:30:52.094147 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-clwtw"] Jan 06 08:30:52 crc kubenswrapper[4784]: I0106 08:30:52.097933 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-clwtw"] Jan 06 08:30:52 crc kubenswrapper[4784]: E0106 08:30:52.170267 4784 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd17eb6f2_2eee_452f_a311_f5f266264bfb.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd17eb6f2_2eee_452f_a311_f5f266264bfb.slice/crio-21a52b8edf324bfd160aaac470443c276a388b30efefe99a1f249d5f197d8a6b\": RecentStats: unable to find data in memory cache]" Jan 06 08:30:52 crc kubenswrapper[4784]: I0106 08:30:52.326104 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d17eb6f2-2eee-452f-a311-f5f266264bfb" path="/var/lib/kubelet/pods/d17eb6f2-2eee-452f-a311-f5f266264bfb/volumes" Jan 06 08:30:52 crc kubenswrapper[4784]: I0106 08:30:52.487180 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z7qsg"] Jan 06 08:30:52 crc kubenswrapper[4784]: I0106 08:30:52.487582 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z7qsg" podUID="39f91e81-a169-4045-acdd-ab2b5b6c7b70" containerName="registry-server" containerID="cri-o://6178aeecd34bec0077052dcc0a9dc2377caa9d0293db2ac908dbd67365d97315" gracePeriod=2 Jan 06 08:30:52 crc kubenswrapper[4784]: I0106 08:30:52.797507 4784 generic.go:334] "Generic (PLEG): container finished" podID="39f91e81-a169-4045-acdd-ab2b5b6c7b70" containerID="6178aeecd34bec0077052dcc0a9dc2377caa9d0293db2ac908dbd67365d97315" exitCode=0 Jan 06 08:30:52 crc kubenswrapper[4784]: I0106 08:30:52.797568 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z7qsg" event={"ID":"39f91e81-a169-4045-acdd-ab2b5b6c7b70","Type":"ContainerDied","Data":"6178aeecd34bec0077052dcc0a9dc2377caa9d0293db2ac908dbd67365d97315"} Jan 06 08:30:52 crc kubenswrapper[4784]: I0106 08:30:52.958938 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.076533 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39f91e81-a169-4045-acdd-ab2b5b6c7b70-utilities\") pod \"39f91e81-a169-4045-acdd-ab2b5b6c7b70\" (UID: \"39f91e81-a169-4045-acdd-ab2b5b6c7b70\") " Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.076772 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-brnwl\" (UniqueName: \"kubernetes.io/projected/39f91e81-a169-4045-acdd-ab2b5b6c7b70-kube-api-access-brnwl\") pod \"39f91e81-a169-4045-acdd-ab2b5b6c7b70\" (UID: \"39f91e81-a169-4045-acdd-ab2b5b6c7b70\") " Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.076879 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39f91e81-a169-4045-acdd-ab2b5b6c7b70-catalog-content\") pod \"39f91e81-a169-4045-acdd-ab2b5b6c7b70\" (UID: \"39f91e81-a169-4045-acdd-ab2b5b6c7b70\") " Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.077519 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39f91e81-a169-4045-acdd-ab2b5b6c7b70-utilities" (OuterVolumeSpecName: "utilities") pod "39f91e81-a169-4045-acdd-ab2b5b6c7b70" (UID: "39f91e81-a169-4045-acdd-ab2b5b6c7b70"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.083481 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39f91e81-a169-4045-acdd-ab2b5b6c7b70-kube-api-access-brnwl" (OuterVolumeSpecName: "kube-api-access-brnwl") pod "39f91e81-a169-4045-acdd-ab2b5b6c7b70" (UID: "39f91e81-a169-4045-acdd-ab2b5b6c7b70"). InnerVolumeSpecName "kube-api-access-brnwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.130849 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39f91e81-a169-4045-acdd-ab2b5b6c7b70-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "39f91e81-a169-4045-acdd-ab2b5b6c7b70" (UID: "39f91e81-a169-4045-acdd-ab2b5b6c7b70"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.178406 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39f91e81-a169-4045-acdd-ab2b5b6c7b70-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.178458 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-brnwl\" (UniqueName: \"kubernetes.io/projected/39f91e81-a169-4045-acdd-ab2b5b6c7b70-kube-api-access-brnwl\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.178501 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39f91e81-a169-4045-acdd-ab2b5b6c7b70-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.811971 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z7qsg" event={"ID":"39f91e81-a169-4045-acdd-ab2b5b6c7b70","Type":"ContainerDied","Data":"5291c9e8f15b4b4d770872b66c3b1bc5ccbdffe40a3a91ea3e352c526a707056"} Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.812024 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z7qsg" Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.812060 4784 scope.go:117] "RemoveContainer" containerID="6178aeecd34bec0077052dcc0a9dc2377caa9d0293db2ac908dbd67365d97315" Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.841724 4784 scope.go:117] "RemoveContainer" containerID="c4fc5a28176a4448df0b7ff71b01c32b4bfbc4f21d93d989fc94855e373ad36a" Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.854813 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z7qsg"] Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.858505 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z7qsg"] Jan 06 08:30:53 crc kubenswrapper[4784]: I0106 08:30:53.869515 4784 scope.go:117] "RemoveContainer" containerID="344c0dd81a38a8d1514b4f0f6a0b2fd3e49e8ee2d45e6c89801ec8e1283d43c0" Jan 06 08:30:54 crc kubenswrapper[4784]: I0106 08:30:54.328243 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39f91e81-a169-4045-acdd-ab2b5b6c7b70" path="/var/lib/kubelet/pods/39f91e81-a169-4045-acdd-ab2b5b6c7b70/volumes" Jan 06 08:31:01 crc kubenswrapper[4784]: I0106 08:31:01.560357 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-7746f6c5b8-f94bg" Jan 06 08:31:14 crc kubenswrapper[4784]: I0106 08:31:14.351428 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:31:14 crc kubenswrapper[4784]: I0106 08:31:14.352087 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.645213 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-f6f74d6db-zvg9p"] Jan 06 08:31:29 crc kubenswrapper[4784]: E0106 08:31:29.645939 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d17eb6f2-2eee-452f-a311-f5f266264bfb" containerName="registry-server" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.645951 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="d17eb6f2-2eee-452f-a311-f5f266264bfb" containerName="registry-server" Jan 06 08:31:29 crc kubenswrapper[4784]: E0106 08:31:29.645962 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d17eb6f2-2eee-452f-a311-f5f266264bfb" containerName="extract-content" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.645968 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="d17eb6f2-2eee-452f-a311-f5f266264bfb" containerName="extract-content" Jan 06 08:31:29 crc kubenswrapper[4784]: E0106 08:31:29.645979 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39f91e81-a169-4045-acdd-ab2b5b6c7b70" containerName="registry-server" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.645985 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="39f91e81-a169-4045-acdd-ab2b5b6c7b70" containerName="registry-server" Jan 06 08:31:29 crc kubenswrapper[4784]: E0106 08:31:29.645995 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d17eb6f2-2eee-452f-a311-f5f266264bfb" containerName="extract-utilities" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.646001 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="d17eb6f2-2eee-452f-a311-f5f266264bfb" containerName="extract-utilities" Jan 06 08:31:29 crc kubenswrapper[4784]: E0106 08:31:29.646010 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39f91e81-a169-4045-acdd-ab2b5b6c7b70" containerName="extract-utilities" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.646019 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="39f91e81-a169-4045-acdd-ab2b5b6c7b70" containerName="extract-utilities" Jan 06 08:31:29 crc kubenswrapper[4784]: E0106 08:31:29.646027 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39f91e81-a169-4045-acdd-ab2b5b6c7b70" containerName="extract-content" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.646033 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="39f91e81-a169-4045-acdd-ab2b5b6c7b70" containerName="extract-content" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.646154 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="39f91e81-a169-4045-acdd-ab2b5b6c7b70" containerName="registry-server" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.646168 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="d17eb6f2-2eee-452f-a311-f5f266264bfb" containerName="registry-server" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.646575 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-zvg9p" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.648366 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-69sb5" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.649425 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-78979fc445-9c6jw"] Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.650160 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-78979fc445-9c6jw" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.654219 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-72wjc" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.671611 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-f6f74d6db-zvg9p"] Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.701587 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-78979fc445-9c6jw"] Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.716716 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-66f8b87655-k79sj"] Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.731806 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-k79sj" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.735948 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-666tx\" (UniqueName: \"kubernetes.io/projected/1a45b65a-e45a-4a0e-be79-81b1d9d60c35-kube-api-access-666tx\") pod \"barbican-operator-controller-manager-f6f74d6db-zvg9p\" (UID: \"1a45b65a-e45a-4a0e-be79-81b1d9d60c35\") " pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-zvg9p" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.737145 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6dms\" (UniqueName: \"kubernetes.io/projected/07c9eb1a-7ea5-4982-a6e9-fffba06a9dd7-kube-api-access-v6dms\") pod \"cinder-operator-controller-manager-78979fc445-9c6jw\" (UID: \"07c9eb1a-7ea5-4982-a6e9-fffba06a9dd7\") " pod="openstack-operators/cinder-operator-controller-manager-78979fc445-9c6jw" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.747249 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-nx5kd" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.786962 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-7b549fc966-7krlv"] Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.803081 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-7krlv" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.806832 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-qtvlq" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.814668 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-66f8b87655-k79sj"] Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.848131 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-666tx\" (UniqueName: \"kubernetes.io/projected/1a45b65a-e45a-4a0e-be79-81b1d9d60c35-kube-api-access-666tx\") pod \"barbican-operator-controller-manager-f6f74d6db-zvg9p\" (UID: \"1a45b65a-e45a-4a0e-be79-81b1d9d60c35\") " pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-zvg9p" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.848222 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6dms\" (UniqueName: \"kubernetes.io/projected/07c9eb1a-7ea5-4982-a6e9-fffba06a9dd7-kube-api-access-v6dms\") pod \"cinder-operator-controller-manager-78979fc445-9c6jw\" (UID: \"07c9eb1a-7ea5-4982-a6e9-fffba06a9dd7\") " pod="openstack-operators/cinder-operator-controller-manager-78979fc445-9c6jw" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.848283 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7x6b\" (UniqueName: \"kubernetes.io/projected/9b052e6e-0823-402f-8e8b-bbd48d0ea36d-kube-api-access-b7x6b\") pod \"designate-operator-controller-manager-66f8b87655-k79sj\" (UID: \"9b052e6e-0823-402f-8e8b-bbd48d0ea36d\") " pod="openstack-operators/designate-operator-controller-manager-66f8b87655-k79sj" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.856757 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7b549fc966-7krlv"] Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.870778 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-658dd65b86-nkztz"] Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.872009 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-658dd65b86-nkztz" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.876826 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-sgqp4" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.898816 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-666tx\" (UniqueName: \"kubernetes.io/projected/1a45b65a-e45a-4a0e-be79-81b1d9d60c35-kube-api-access-666tx\") pod \"barbican-operator-controller-manager-f6f74d6db-zvg9p\" (UID: \"1a45b65a-e45a-4a0e-be79-81b1d9d60c35\") " pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-zvg9p" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.903854 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6dms\" (UniqueName: \"kubernetes.io/projected/07c9eb1a-7ea5-4982-a6e9-fffba06a9dd7-kube-api-access-v6dms\") pod \"cinder-operator-controller-manager-78979fc445-9c6jw\" (UID: \"07c9eb1a-7ea5-4982-a6e9-fffba06a9dd7\") " pod="openstack-operators/cinder-operator-controller-manager-78979fc445-9c6jw" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.903944 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-658dd65b86-nkztz"] Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.919082 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-lw8c8"] Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.919966 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-lw8c8" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.928288 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-4spsw" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.929605 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj"] Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.930414 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.932421 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.934355 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-ltbnl" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.949030 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbr5j\" (UniqueName: \"kubernetes.io/projected/bd426be0-7b17-4514-b36a-8e25c067a2e9-kube-api-access-vbr5j\") pod \"glance-operator-controller-manager-7b549fc966-7krlv\" (UID: \"bd426be0-7b17-4514-b36a-8e25c067a2e9\") " pod="openstack-operators/glance-operator-controller-manager-7b549fc966-7krlv" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.949083 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4jlc\" (UniqueName: \"kubernetes.io/projected/fc26f7fb-7146-41b2-b288-96c827f08dd4-kube-api-access-v4jlc\") pod \"heat-operator-controller-manager-658dd65b86-nkztz\" (UID: \"fc26f7fb-7146-41b2-b288-96c827f08dd4\") " pod="openstack-operators/heat-operator-controller-manager-658dd65b86-nkztz" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.949136 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7x6b\" (UniqueName: \"kubernetes.io/projected/9b052e6e-0823-402f-8e8b-bbd48d0ea36d-kube-api-access-b7x6b\") pod \"designate-operator-controller-manager-66f8b87655-k79sj\" (UID: \"9b052e6e-0823-402f-8e8b-bbd48d0ea36d\") " pod="openstack-operators/designate-operator-controller-manager-66f8b87655-k79sj" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.959800 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-lw8c8"] Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.966387 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-f99f54bc8-ntw8z"] Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.967702 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-ntw8z" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.968801 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-zvg9p" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.971136 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-5kk4v" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.978939 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-78979fc445-9c6jw" Jan 06 08:31:29 crc kubenswrapper[4784]: I0106 08:31:29.995695 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.002867 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7x6b\" (UniqueName: \"kubernetes.io/projected/9b052e6e-0823-402f-8e8b-bbd48d0ea36d-kube-api-access-b7x6b\") pod \"designate-operator-controller-manager-66f8b87655-k79sj\" (UID: \"9b052e6e-0823-402f-8e8b-bbd48d0ea36d\") " pod="openstack-operators/designate-operator-controller-manager-66f8b87655-k79sj" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.013374 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-568985c78-q4rwb"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.014730 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-568985c78-q4rwb" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.017809 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-mh6ls" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.024133 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-598945d5b8-s6sv6"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.040124 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-s6sv6" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.050688 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-f99f54bc8-ntw8z"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.051049 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-x9xqf" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.052222 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert\") pod \"infra-operator-controller-manager-6d99759cf-tqhzj\" (UID: \"edc01c87-757b-47c7-b3cb-bfbb7ec71797\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.052276 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96hjx\" (UniqueName: \"kubernetes.io/projected/edc01c87-757b-47c7-b3cb-bfbb7ec71797-kube-api-access-96hjx\") pod \"infra-operator-controller-manager-6d99759cf-tqhzj\" (UID: \"edc01c87-757b-47c7-b3cb-bfbb7ec71797\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.052349 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tn5bp\" (UniqueName: \"kubernetes.io/projected/6da1078f-40ff-46ae-af83-b4befd08da78-kube-api-access-tn5bp\") pod \"horizon-operator-controller-manager-7f5ddd8d7b-lw8c8\" (UID: \"6da1078f-40ff-46ae-af83-b4befd08da78\") " pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-lw8c8" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.052382 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwt5k\" (UniqueName: \"kubernetes.io/projected/74544171-df6c-4a36-8955-3b11da058598-kube-api-access-gwt5k\") pod \"ironic-operator-controller-manager-f99f54bc8-ntw8z\" (UID: \"74544171-df6c-4a36-8955-3b11da058598\") " pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-ntw8z" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.052428 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbr5j\" (UniqueName: \"kubernetes.io/projected/bd426be0-7b17-4514-b36a-8e25c067a2e9-kube-api-access-vbr5j\") pod \"glance-operator-controller-manager-7b549fc966-7krlv\" (UID: \"bd426be0-7b17-4514-b36a-8e25c067a2e9\") " pod="openstack-operators/glance-operator-controller-manager-7b549fc966-7krlv" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.052461 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4jlc\" (UniqueName: \"kubernetes.io/projected/fc26f7fb-7146-41b2-b288-96c827f08dd4-kube-api-access-v4jlc\") pod \"heat-operator-controller-manager-658dd65b86-nkztz\" (UID: \"fc26f7fb-7146-41b2-b288-96c827f08dd4\") " pod="openstack-operators/heat-operator-controller-manager-658dd65b86-nkztz" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.067848 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-598945d5b8-s6sv6"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.082702 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4jlc\" (UniqueName: \"kubernetes.io/projected/fc26f7fb-7146-41b2-b288-96c827f08dd4-kube-api-access-v4jlc\") pod \"heat-operator-controller-manager-658dd65b86-nkztz\" (UID: \"fc26f7fb-7146-41b2-b288-96c827f08dd4\") " pod="openstack-operators/heat-operator-controller-manager-658dd65b86-nkztz" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.090441 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbr5j\" (UniqueName: \"kubernetes.io/projected/bd426be0-7b17-4514-b36a-8e25c067a2e9-kube-api-access-vbr5j\") pod \"glance-operator-controller-manager-7b549fc966-7krlv\" (UID: \"bd426be0-7b17-4514-b36a-8e25c067a2e9\") " pod="openstack-operators/glance-operator-controller-manager-7b549fc966-7krlv" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.090530 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-7b88bfc995-chgf2"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.091648 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-chgf2" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.100135 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-k79sj" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.101312 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-wcfnt" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.101693 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-7b88bfc995-chgf2"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.131649 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-568985c78-q4rwb"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.143625 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7cd87b778f-5t7n9"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.144964 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-5t7n9" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.148795 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-fswjw" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.153583 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-7krlv" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.154212 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tn5bp\" (UniqueName: \"kubernetes.io/projected/6da1078f-40ff-46ae-af83-b4befd08da78-kube-api-access-tn5bp\") pod \"horizon-operator-controller-manager-7f5ddd8d7b-lw8c8\" (UID: \"6da1078f-40ff-46ae-af83-b4befd08da78\") " pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-lw8c8" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.154284 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwt5k\" (UniqueName: \"kubernetes.io/projected/74544171-df6c-4a36-8955-3b11da058598-kube-api-access-gwt5k\") pod \"ironic-operator-controller-manager-f99f54bc8-ntw8z\" (UID: \"74544171-df6c-4a36-8955-3b11da058598\") " pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-ntw8z" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.154423 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cc4z\" (UniqueName: \"kubernetes.io/projected/d74a08a3-5b46-47c3-8c74-d29e532e4df3-kube-api-access-4cc4z\") pod \"mariadb-operator-controller-manager-7b88bfc995-chgf2\" (UID: \"d74a08a3-5b46-47c3-8c74-d29e532e4df3\") " pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-chgf2" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.154487 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cx9qm\" (UniqueName: \"kubernetes.io/projected/4a449bab-eabb-457b-94a3-c2a5bfd9827c-kube-api-access-cx9qm\") pod \"keystone-operator-controller-manager-568985c78-q4rwb\" (UID: \"4a449bab-eabb-457b-94a3-c2a5bfd9827c\") " pod="openstack-operators/keystone-operator-controller-manager-568985c78-q4rwb" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.154596 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert\") pod \"infra-operator-controller-manager-6d99759cf-tqhzj\" (UID: \"edc01c87-757b-47c7-b3cb-bfbb7ec71797\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" Jan 06 08:31:30 crc kubenswrapper[4784]: E0106 08:31:30.154768 4784 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 06 08:31:30 crc kubenswrapper[4784]: E0106 08:31:30.154836 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert podName:edc01c87-757b-47c7-b3cb-bfbb7ec71797 nodeName:}" failed. No retries permitted until 2026-01-06 08:31:30.654814146 +0000 UTC m=+992.700986983 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert") pod "infra-operator-controller-manager-6d99759cf-tqhzj" (UID: "edc01c87-757b-47c7-b3cb-bfbb7ec71797") : secret "infra-operator-webhook-server-cert" not found Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.157123 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dg44\" (UniqueName: \"kubernetes.io/projected/1c68028b-8461-491c-bf85-78776c51b77d-kube-api-access-6dg44\") pod \"manila-operator-controller-manager-598945d5b8-s6sv6\" (UID: \"1c68028b-8461-491c-bf85-78776c51b77d\") " pod="openstack-operators/manila-operator-controller-manager-598945d5b8-s6sv6" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.157196 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96hjx\" (UniqueName: \"kubernetes.io/projected/edc01c87-757b-47c7-b3cb-bfbb7ec71797-kube-api-access-96hjx\") pod \"infra-operator-controller-manager-6d99759cf-tqhzj\" (UID: \"edc01c87-757b-47c7-b3cb-bfbb7ec71797\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.172265 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7cd87b778f-5t7n9"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.172917 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwt5k\" (UniqueName: \"kubernetes.io/projected/74544171-df6c-4a36-8955-3b11da058598-kube-api-access-gwt5k\") pod \"ironic-operator-controller-manager-f99f54bc8-ntw8z\" (UID: \"74544171-df6c-4a36-8955-3b11da058598\") " pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-ntw8z" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.179730 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tn5bp\" (UniqueName: \"kubernetes.io/projected/6da1078f-40ff-46ae-af83-b4befd08da78-kube-api-access-tn5bp\") pod \"horizon-operator-controller-manager-7f5ddd8d7b-lw8c8\" (UID: \"6da1078f-40ff-46ae-af83-b4befd08da78\") " pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-lw8c8" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.179996 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96hjx\" (UniqueName: \"kubernetes.io/projected/edc01c87-757b-47c7-b3cb-bfbb7ec71797-kube-api-access-96hjx\") pod \"infra-operator-controller-manager-6d99759cf-tqhzj\" (UID: \"edc01c87-757b-47c7-b3cb-bfbb7ec71797\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.195712 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-pz42s"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.196747 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-pz42s" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.200909 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-68c649d9d-9gkmh"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.202117 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-9gkmh" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.209112 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-948v8" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.209410 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-47dsf" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.234874 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-pz42s"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.247713 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-68c649d9d-9gkmh"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.248106 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-658dd65b86-nkztz" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.262891 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cc4z\" (UniqueName: \"kubernetes.io/projected/d74a08a3-5b46-47c3-8c74-d29e532e4df3-kube-api-access-4cc4z\") pod \"mariadb-operator-controller-manager-7b88bfc995-chgf2\" (UID: \"d74a08a3-5b46-47c3-8c74-d29e532e4df3\") " pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-chgf2" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.262950 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cx9qm\" (UniqueName: \"kubernetes.io/projected/4a449bab-eabb-457b-94a3-c2a5bfd9827c-kube-api-access-cx9qm\") pod \"keystone-operator-controller-manager-568985c78-q4rwb\" (UID: \"4a449bab-eabb-457b-94a3-c2a5bfd9827c\") " pod="openstack-operators/keystone-operator-controller-manager-568985c78-q4rwb" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.263022 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dg44\" (UniqueName: \"kubernetes.io/projected/1c68028b-8461-491c-bf85-78776c51b77d-kube-api-access-6dg44\") pod \"manila-operator-controller-manager-598945d5b8-s6sv6\" (UID: \"1c68028b-8461-491c-bf85-78776c51b77d\") " pod="openstack-operators/manila-operator-controller-manager-598945d5b8-s6sv6" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.263059 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cxsq\" (UniqueName: \"kubernetes.io/projected/fba34147-9dd8-4e21-a886-9d1de06ef7ad-kube-api-access-5cxsq\") pod \"nova-operator-controller-manager-5fbbf8b6cc-pz42s\" (UID: \"fba34147-9dd8-4e21-a886-9d1de06ef7ad\") " pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-pz42s" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.263092 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-244c2\" (UniqueName: \"kubernetes.io/projected/754f68df-1972-403c-927d-898d74e9191a-kube-api-access-244c2\") pod \"neutron-operator-controller-manager-7cd87b778f-5t7n9\" (UID: \"754f68df-1972-403c-927d-898d74e9191a\") " pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-5t7n9" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.263136 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9krc6\" (UniqueName: \"kubernetes.io/projected/adad8ba1-23e8-40d7-95b8-60cc2803765c-kube-api-access-9krc6\") pod \"octavia-operator-controller-manager-68c649d9d-9gkmh\" (UID: \"adad8ba1-23e8-40d7-95b8-60cc2803765c\") " pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-9gkmh" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.264409 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.267934 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.281959 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.282021 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-lw8c8" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.285948 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-rb84m" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.292326 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cc4z\" (UniqueName: \"kubernetes.io/projected/d74a08a3-5b46-47c3-8c74-d29e532e4df3-kube-api-access-4cc4z\") pod \"mariadb-operator-controller-manager-7b88bfc995-chgf2\" (UID: \"d74a08a3-5b46-47c3-8c74-d29e532e4df3\") " pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-chgf2" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.296783 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dg44\" (UniqueName: \"kubernetes.io/projected/1c68028b-8461-491c-bf85-78776c51b77d-kube-api-access-6dg44\") pod \"manila-operator-controller-manager-598945d5b8-s6sv6\" (UID: \"1c68028b-8461-491c-bf85-78776c51b77d\") " pod="openstack-operators/manila-operator-controller-manager-598945d5b8-s6sv6" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.323284 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cx9qm\" (UniqueName: \"kubernetes.io/projected/4a449bab-eabb-457b-94a3-c2a5bfd9827c-kube-api-access-cx9qm\") pod \"keystone-operator-controller-manager-568985c78-q4rwb\" (UID: \"4a449bab-eabb-457b-94a3-c2a5bfd9827c\") " pod="openstack-operators/keystone-operator-controller-manager-568985c78-q4rwb" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.367227 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvw9t\" (UniqueName: \"kubernetes.io/projected/5303160b-4666-4800-8f86-72b1a823073d-kube-api-access-vvw9t\") pod \"openstack-baremetal-operator-controller-manager-74b998cd6-96jtt\" (UID: \"5303160b-4666-4800-8f86-72b1a823073d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.367327 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cxsq\" (UniqueName: \"kubernetes.io/projected/fba34147-9dd8-4e21-a886-9d1de06ef7ad-kube-api-access-5cxsq\") pod \"nova-operator-controller-manager-5fbbf8b6cc-pz42s\" (UID: \"fba34147-9dd8-4e21-a886-9d1de06ef7ad\") " pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-pz42s" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.367386 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-244c2\" (UniqueName: \"kubernetes.io/projected/754f68df-1972-403c-927d-898d74e9191a-kube-api-access-244c2\") pod \"neutron-operator-controller-manager-7cd87b778f-5t7n9\" (UID: \"754f68df-1972-403c-927d-898d74e9191a\") " pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-5t7n9" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.367426 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9krc6\" (UniqueName: \"kubernetes.io/projected/adad8ba1-23e8-40d7-95b8-60cc2803765c-kube-api-access-9krc6\") pod \"octavia-operator-controller-manager-68c649d9d-9gkmh\" (UID: \"adad8ba1-23e8-40d7-95b8-60cc2803765c\") " pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-9gkmh" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.367460 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert\") pod \"openstack-baremetal-operator-controller-manager-74b998cd6-96jtt\" (UID: \"5303160b-4666-4800-8f86-72b1a823073d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.408375 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.409181 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.410750 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.411257 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cxsq\" (UniqueName: \"kubernetes.io/projected/fba34147-9dd8-4e21-a886-9d1de06ef7ad-kube-api-access-5cxsq\") pod \"nova-operator-controller-manager-5fbbf8b6cc-pz42s\" (UID: \"fba34147-9dd8-4e21-a886-9d1de06ef7ad\") " pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-pz42s" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.416964 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-244c2\" (UniqueName: \"kubernetes.io/projected/754f68df-1972-403c-927d-898d74e9191a-kube-api-access-244c2\") pod \"neutron-operator-controller-manager-7cd87b778f-5t7n9\" (UID: \"754f68df-1972-403c-927d-898d74e9191a\") " pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-5t7n9" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.417383 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-h4fsn" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.418314 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9krc6\" (UniqueName: \"kubernetes.io/projected/adad8ba1-23e8-40d7-95b8-60cc2803765c-kube-api-access-9krc6\") pod \"octavia-operator-controller-manager-68c649d9d-9gkmh\" (UID: \"adad8ba1-23e8-40d7-95b8-60cc2803765c\") " pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-9gkmh" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.428580 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-ntw8z" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.466586 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.476013 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-568985c78-q4rwb" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.478310 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-s6sv6" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.486217 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert\") pod \"openstack-baremetal-operator-controller-manager-74b998cd6-96jtt\" (UID: \"5303160b-4666-4800-8f86-72b1a823073d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.486281 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5qhg\" (UniqueName: \"kubernetes.io/projected/18c82783-5901-4ed0-ba58-23f5951cc7d1-kube-api-access-k5qhg\") pod \"ovn-operator-controller-manager-bf6d4f946-8lhqg\" (UID: \"18c82783-5901-4ed0-ba58-23f5951cc7d1\") " pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.486358 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvw9t\" (UniqueName: \"kubernetes.io/projected/5303160b-4666-4800-8f86-72b1a823073d-kube-api-access-vvw9t\") pod \"openstack-baremetal-operator-controller-manager-74b998cd6-96jtt\" (UID: \"5303160b-4666-4800-8f86-72b1a823073d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:31:30 crc kubenswrapper[4784]: E0106 08:31:30.486835 4784 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 06 08:31:30 crc kubenswrapper[4784]: E0106 08:31:30.486896 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert podName:5303160b-4666-4800-8f86-72b1a823073d nodeName:}" failed. No retries permitted until 2026-01-06 08:31:30.986875029 +0000 UTC m=+993.033047866 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert") pod "openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" (UID: "5303160b-4666-4800-8f86-72b1a823073d") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.497736 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-chgf2" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.526019 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-5t7n9" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.528906 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvw9t\" (UniqueName: \"kubernetes.io/projected/5303160b-4666-4800-8f86-72b1a823073d-kube-api-access-vvw9t\") pod \"openstack-baremetal-operator-controller-manager-74b998cd6-96jtt\" (UID: \"5303160b-4666-4800-8f86-72b1a823073d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.532382 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-bb586bbf4-rvf72"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.533853 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-rvf72" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.541912 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-946m7" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.548151 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-pz42s" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.550817 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-9gkmh" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.568429 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-68d988df55-r6lj7"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.569762 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-r6lj7" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.576605 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-4mq8c" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.591006 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5qhg\" (UniqueName: \"kubernetes.io/projected/18c82783-5901-4ed0-ba58-23f5951cc7d1-kube-api-access-k5qhg\") pod \"ovn-operator-controller-manager-bf6d4f946-8lhqg\" (UID: \"18c82783-5901-4ed0-ba58-23f5951cc7d1\") " pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.600568 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.601996 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.619244 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-bb586bbf4-rvf72"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.621493 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5qhg\" (UniqueName: \"kubernetes.io/projected/18c82783-5901-4ed0-ba58-23f5951cc7d1-kube-api-access-k5qhg\") pod \"ovn-operator-controller-manager-bf6d4f946-8lhqg\" (UID: \"18c82783-5901-4ed0-ba58-23f5951cc7d1\") " pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.635200 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-sb4l4" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.637582 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-68d988df55-r6lj7"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.648649 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.667052 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.668207 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.674956 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-xzwnx" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.697028 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5czf\" (UniqueName: \"kubernetes.io/projected/943574ea-c3fa-4541-8c22-d0b799c7497a-kube-api-access-g5czf\") pod \"swift-operator-controller-manager-bb586bbf4-rvf72\" (UID: \"943574ea-c3fa-4541-8c22-d0b799c7497a\") " pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-rvf72" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.697105 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdpvb\" (UniqueName: \"kubernetes.io/projected/8ef4d6f9-7a4c-4f30-984c-0a96f344a539-kube-api-access-zdpvb\") pod \"placement-operator-controller-manager-9b6f8f78c-8bdgx\" (UID: \"8ef4d6f9-7a4c-4f30-984c-0a96f344a539\") " pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.697157 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert\") pod \"infra-operator-controller-manager-6d99759cf-tqhzj\" (UID: \"edc01c87-757b-47c7-b3cb-bfbb7ec71797\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.697240 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsdht\" (UniqueName: \"kubernetes.io/projected/4e27981f-a624-4063-b5af-a6ee3fd1c535-kube-api-access-tsdht\") pod \"telemetry-operator-controller-manager-68d988df55-r6lj7\" (UID: \"4e27981f-a624-4063-b5af-a6ee3fd1c535\") " pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-r6lj7" Jan 06 08:31:30 crc kubenswrapper[4784]: E0106 08:31:30.697302 4784 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 06 08:31:30 crc kubenswrapper[4784]: E0106 08:31:30.697376 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert podName:edc01c87-757b-47c7-b3cb-bfbb7ec71797 nodeName:}" failed. No retries permitted until 2026-01-06 08:31:31.697353215 +0000 UTC m=+993.743526052 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert") pod "infra-operator-controller-manager-6d99759cf-tqhzj" (UID: "edc01c87-757b-47c7-b3cb-bfbb7ec71797") : secret "infra-operator-webhook-server-cert" not found Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.712755 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.738042 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.773850 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.783255 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.785395 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-6b5mp" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.799738 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5czf\" (UniqueName: \"kubernetes.io/projected/943574ea-c3fa-4541-8c22-d0b799c7497a-kube-api-access-g5czf\") pod \"swift-operator-controller-manager-bb586bbf4-rvf72\" (UID: \"943574ea-c3fa-4541-8c22-d0b799c7497a\") " pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-rvf72" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.799809 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdpvb\" (UniqueName: \"kubernetes.io/projected/8ef4d6f9-7a4c-4f30-984c-0a96f344a539-kube-api-access-zdpvb\") pod \"placement-operator-controller-manager-9b6f8f78c-8bdgx\" (UID: \"8ef4d6f9-7a4c-4f30-984c-0a96f344a539\") " pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.799863 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsdht\" (UniqueName: \"kubernetes.io/projected/4e27981f-a624-4063-b5af-a6ee3fd1c535-kube-api-access-tsdht\") pod \"telemetry-operator-controller-manager-68d988df55-r6lj7\" (UID: \"4e27981f-a624-4063-b5af-a6ee3fd1c535\") " pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-r6lj7" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.799896 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kzkp\" (UniqueName: \"kubernetes.io/projected/184dc664-1b08-43fe-bc79-6b6bdcc2563f-kube-api-access-7kzkp\") pod \"test-operator-controller-manager-6c866cfdcb-qfc8h\" (UID: \"184dc664-1b08-43fe-bc79-6b6bdcc2563f\") " pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.809617 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.832462 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5czf\" (UniqueName: \"kubernetes.io/projected/943574ea-c3fa-4541-8c22-d0b799c7497a-kube-api-access-g5czf\") pod \"swift-operator-controller-manager-bb586bbf4-rvf72\" (UID: \"943574ea-c3fa-4541-8c22-d0b799c7497a\") " pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-rvf72" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.852425 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsdht\" (UniqueName: \"kubernetes.io/projected/4e27981f-a624-4063-b5af-a6ee3fd1c535-kube-api-access-tsdht\") pod \"telemetry-operator-controller-manager-68d988df55-r6lj7\" (UID: \"4e27981f-a624-4063-b5af-a6ee3fd1c535\") " pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-r6lj7" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.860401 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdpvb\" (UniqueName: \"kubernetes.io/projected/8ef4d6f9-7a4c-4f30-984c-0a96f344a539-kube-api-access-zdpvb\") pod \"placement-operator-controller-manager-9b6f8f78c-8bdgx\" (UID: \"8ef4d6f9-7a4c-4f30-984c-0a96f344a539\") " pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.860483 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.861460 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.864926 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.865393 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-bgz56" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.865724 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.874116 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.886209 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-rvf72" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.892516 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cqj6x"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.893865 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cqj6x" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.897189 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-j5g6r" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.897417 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cqj6x"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.901804 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kzkp\" (UniqueName: \"kubernetes.io/projected/184dc664-1b08-43fe-bc79-6b6bdcc2563f-kube-api-access-7kzkp\") pod \"test-operator-controller-manager-6c866cfdcb-qfc8h\" (UID: \"184dc664-1b08-43fe-bc79-6b6bdcc2563f\") " pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.901854 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvdmb\" (UniqueName: \"kubernetes.io/projected/fd92de86-98c0-4c11-9895-7b78b2aef05b-kube-api-access-mvdmb\") pod \"watcher-operator-controller-manager-9dbdf6486-wk4xc\" (UID: \"fd92de86-98c0-4c11-9895-7b78b2aef05b\") " pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.914131 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-r6lj7" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.918055 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kzkp\" (UniqueName: \"kubernetes.io/projected/184dc664-1b08-43fe-bc79-6b6bdcc2563f-kube-api-access-7kzkp\") pod \"test-operator-controller-manager-6c866cfdcb-qfc8h\" (UID: \"184dc664-1b08-43fe-bc79-6b6bdcc2563f\") " pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h" Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.925751 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-78979fc445-9c6jw"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.934674 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-f6f74d6db-zvg9p"] Jan 06 08:31:30 crc kubenswrapper[4784]: I0106 08:31:30.952028 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx" Jan 06 08:31:30 crc kubenswrapper[4784]: W0106 08:31:30.966183 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a45b65a_e45a_4a0e_be79_81b1d9d60c35.slice/crio-7f5bd3ebff699702033e041ce1d264e12e2dfe786201c7123724dad345975dab WatchSource:0}: Error finding container 7f5bd3ebff699702033e041ce1d264e12e2dfe786201c7123724dad345975dab: Status 404 returned error can't find the container with id 7f5bd3ebff699702033e041ce1d264e12e2dfe786201c7123724dad345975dab Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.003593 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h" Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.003885 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nxwz\" (UniqueName: \"kubernetes.io/projected/05e4a6ac-9ee3-4726-8709-945b37705103-kube-api-access-4nxwz\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.003929 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d645h\" (UniqueName: \"kubernetes.io/projected/76b61982-bafd-4f7a-a473-f5c0ef78ee74-kube-api-access-d645h\") pod \"rabbitmq-cluster-operator-manager-668c99d594-cqj6x\" (UID: \"76b61982-bafd-4f7a-a473-f5c0ef78ee74\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cqj6x" Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.003976 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert\") pod \"openstack-baremetal-operator-controller-manager-74b998cd6-96jtt\" (UID: \"5303160b-4666-4800-8f86-72b1a823073d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.004026 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.004305 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.004333 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvdmb\" (UniqueName: \"kubernetes.io/projected/fd92de86-98c0-4c11-9895-7b78b2aef05b-kube-api-access-mvdmb\") pod \"watcher-operator-controller-manager-9dbdf6486-wk4xc\" (UID: \"fd92de86-98c0-4c11-9895-7b78b2aef05b\") " pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc" Jan 06 08:31:31 crc kubenswrapper[4784]: E0106 08:31:31.004419 4784 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 06 08:31:31 crc kubenswrapper[4784]: E0106 08:31:31.004586 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert podName:5303160b-4666-4800-8f86-72b1a823073d nodeName:}" failed. No retries permitted until 2026-01-06 08:31:32.004537458 +0000 UTC m=+994.050710295 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert") pod "openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" (UID: "5303160b-4666-4800-8f86-72b1a823073d") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.029383 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvdmb\" (UniqueName: \"kubernetes.io/projected/fd92de86-98c0-4c11-9895-7b78b2aef05b-kube-api-access-mvdmb\") pod \"watcher-operator-controller-manager-9dbdf6486-wk4xc\" (UID: \"fd92de86-98c0-4c11-9895-7b78b2aef05b\") " pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc" Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.070981 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc" Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.106075 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.106180 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.106225 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nxwz\" (UniqueName: \"kubernetes.io/projected/05e4a6ac-9ee3-4726-8709-945b37705103-kube-api-access-4nxwz\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.106258 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d645h\" (UniqueName: \"kubernetes.io/projected/76b61982-bafd-4f7a-a473-f5c0ef78ee74-kube-api-access-d645h\") pod \"rabbitmq-cluster-operator-manager-668c99d594-cqj6x\" (UID: \"76b61982-bafd-4f7a-a473-f5c0ef78ee74\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cqj6x" Jan 06 08:31:31 crc kubenswrapper[4784]: E0106 08:31:31.106263 4784 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 06 08:31:31 crc kubenswrapper[4784]: E0106 08:31:31.106337 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs podName:05e4a6ac-9ee3-4726-8709-945b37705103 nodeName:}" failed. No retries permitted until 2026-01-06 08:31:31.60631797 +0000 UTC m=+993.652490807 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs") pod "openstack-operator-controller-manager-77df58d67c-27bnt" (UID: "05e4a6ac-9ee3-4726-8709-945b37705103") : secret "metrics-server-cert" not found Jan 06 08:31:31 crc kubenswrapper[4784]: E0106 08:31:31.106613 4784 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 06 08:31:31 crc kubenswrapper[4784]: E0106 08:31:31.106949 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs podName:05e4a6ac-9ee3-4726-8709-945b37705103 nodeName:}" failed. No retries permitted until 2026-01-06 08:31:31.606934788 +0000 UTC m=+993.653107625 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs") pod "openstack-operator-controller-manager-77df58d67c-27bnt" (UID: "05e4a6ac-9ee3-4726-8709-945b37705103") : secret "webhook-server-cert" not found Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.132219 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d645h\" (UniqueName: \"kubernetes.io/projected/76b61982-bafd-4f7a-a473-f5c0ef78ee74-kube-api-access-d645h\") pod \"rabbitmq-cluster-operator-manager-668c99d594-cqj6x\" (UID: \"76b61982-bafd-4f7a-a473-f5c0ef78ee74\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cqj6x" Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.144710 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nxwz\" (UniqueName: \"kubernetes.io/projected/05e4a6ac-9ee3-4726-8709-945b37705103-kube-api-access-4nxwz\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.151516 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cqj6x" Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.241617 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-zvg9p" event={"ID":"1a45b65a-e45a-4a0e-be79-81b1d9d60c35","Type":"ContainerStarted","Data":"7f5bd3ebff699702033e041ce1d264e12e2dfe786201c7123724dad345975dab"} Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.245898 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-78979fc445-9c6jw" event={"ID":"07c9eb1a-7ea5-4982-a6e9-fffba06a9dd7","Type":"ContainerStarted","Data":"b9555cd1a3224d1a0c80c0d33746af05c819f02ea9ed6cb7e511596cdc4ea3d6"} Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.245992 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-658dd65b86-nkztz"] Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.256528 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7b549fc966-7krlv"] Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.263088 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-lw8c8"] Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.268763 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-66f8b87655-k79sj"] Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.323090 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-f99f54bc8-ntw8z"] Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.490265 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-598945d5b8-s6sv6"] Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.524704 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7cd87b778f-5t7n9"] Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.548994 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-568985c78-q4rwb"] Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.635228 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.635338 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:31 crc kubenswrapper[4784]: E0106 08:31:31.635490 4784 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 06 08:31:31 crc kubenswrapper[4784]: E0106 08:31:31.635561 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs podName:05e4a6ac-9ee3-4726-8709-945b37705103 nodeName:}" failed. No retries permitted until 2026-01-06 08:31:32.635526443 +0000 UTC m=+994.681699280 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs") pod "openstack-operator-controller-manager-77df58d67c-27bnt" (UID: "05e4a6ac-9ee3-4726-8709-945b37705103") : secret "metrics-server-cert" not found Jan 06 08:31:31 crc kubenswrapper[4784]: E0106 08:31:31.635907 4784 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 06 08:31:31 crc kubenswrapper[4784]: E0106 08:31:31.635929 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs podName:05e4a6ac-9ee3-4726-8709-945b37705103 nodeName:}" failed. No retries permitted until 2026-01-06 08:31:32.635922585 +0000 UTC m=+994.682095422 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs") pod "openstack-operator-controller-manager-77df58d67c-27bnt" (UID: "05e4a6ac-9ee3-4726-8709-945b37705103") : secret "webhook-server-cert" not found Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.736649 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert\") pod \"infra-operator-controller-manager-6d99759cf-tqhzj\" (UID: \"edc01c87-757b-47c7-b3cb-bfbb7ec71797\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" Jan 06 08:31:31 crc kubenswrapper[4784]: E0106 08:31:31.736788 4784 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 06 08:31:31 crc kubenswrapper[4784]: E0106 08:31:31.736836 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert podName:edc01c87-757b-47c7-b3cb-bfbb7ec71797 nodeName:}" failed. No retries permitted until 2026-01-06 08:31:33.736821822 +0000 UTC m=+995.782994659 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert") pod "infra-operator-controller-manager-6d99759cf-tqhzj" (UID: "edc01c87-757b-47c7-b3cb-bfbb7ec71797") : secret "infra-operator-webhook-server-cert" not found Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.847039 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-68c649d9d-9gkmh"] Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.925197 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-68d988df55-r6lj7"] Jan 06 08:31:31 crc kubenswrapper[4784]: I0106 08:31:31.957177 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-7b88bfc995-chgf2"] Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.001861 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-bb586bbf4-rvf72"] Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.013280 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h"] Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.020113 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-pz42s"] Jan 06 08:31:32 crc kubenswrapper[4784]: W0106 08:31:32.030733 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod18c82783_5901_4ed0_ba58_23f5951cc7d1.slice/crio-a22235917ca9265db5d41529eab8aeab730d7bae448d885e695587c7d9235130 WatchSource:0}: Error finding container a22235917ca9265db5d41529eab8aeab730d7bae448d885e695587c7d9235130: Status 404 returned error can't find the container with id a22235917ca9265db5d41529eab8aeab730d7bae448d885e695587c7d9235130 Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.033509 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg"] Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.035867 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-k5qhg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-bf6d4f946-8lhqg_openstack-operators(18c82783-5901-4ed0-ba58-23f5951cc7d1): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.040913 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg" podUID="18c82783-5901-4ed0-ba58-23f5951cc7d1" Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.042961 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert\") pod \"openstack-baremetal-operator-controller-manager-74b998cd6-96jtt\" (UID: \"5303160b-4666-4800-8f86-72b1a823073d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.043256 4784 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.043315 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert podName:5303160b-4666-4800-8f86-72b1a823073d nodeName:}" failed. No retries permitted until 2026-01-06 08:31:34.043297615 +0000 UTC m=+996.089470452 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert") pod "openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" (UID: "5303160b-4666-4800-8f86-72b1a823073d") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.045050 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:4e3d234c1398039c2593611f7b0fd2a6b284cafb1563e6737876a265b9af42b6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7kzkp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-6c866cfdcb-qfc8h_openstack-operators(184dc664-1b08-43fe-bc79-6b6bdcc2563f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.046840 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h" podUID="184dc664-1b08-43fe-bc79-6b6bdcc2563f" Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.100532 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx"] Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.113362 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:1b684c4ca525a279deee45980140d895e264526c5c7e0a6981d6fae6cbcaa420,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zdpvb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-9b6f8f78c-8bdgx_openstack-operators(8ef4d6f9-7a4c-4f30-984c-0a96f344a539): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.114718 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx" podUID="8ef4d6f9-7a4c-4f30-984c-0a96f344a539" Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.114779 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cqj6x"] Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.126150 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc"] Jan 06 08:31:32 crc kubenswrapper[4784]: W0106 08:31:32.130803 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd92de86_98c0_4c11_9895_7b78b2aef05b.slice/crio-4fd17c3d75e470012ae22e24cabcc4980279c9495b270efa0c02266acb2107a5 WatchSource:0}: Error finding container 4fd17c3d75e470012ae22e24cabcc4980279c9495b270efa0c02266acb2107a5: Status 404 returned error can't find the container with id 4fd17c3d75e470012ae22e24cabcc4980279c9495b270efa0c02266acb2107a5 Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.133636 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:f0ece9a81e4be3dbc1ff752a951970380546d8c0dea910953f862c219444b97a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mvdmb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-9dbdf6486-wk4xc_openstack-operators(fd92de86-98c0-4c11-9895-7b78b2aef05b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.135217 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc" podUID="fd92de86-98c0-4c11-9895-7b78b2aef05b" Jan 06 08:31:32 crc kubenswrapper[4784]: W0106 08:31:32.137244 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod76b61982_bafd_4f7a_a473_f5c0ef78ee74.slice/crio-eff039933a38aecc1ac2824aa1bf1048dcc334a33f0c6faa3f67538c7d21281e WatchSource:0}: Error finding container eff039933a38aecc1ac2824aa1bf1048dcc334a33f0c6faa3f67538c7d21281e: Status 404 returned error can't find the container with id eff039933a38aecc1ac2824aa1bf1048dcc334a33f0c6faa3f67538c7d21281e Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.154004 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-d645h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-cqj6x_openstack-operators(76b61982-bafd-4f7a-a473-f5c0ef78ee74): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.155644 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cqj6x" podUID="76b61982-bafd-4f7a-a473-f5c0ef78ee74" Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.256885 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-k79sj" event={"ID":"9b052e6e-0823-402f-8e8b-bbd48d0ea36d","Type":"ContainerStarted","Data":"6a970e94ed643d21237ccff3b2d8128eeee1e8f7d34f3ea49185b38f35a3dcec"} Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.278780 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-9gkmh" event={"ID":"adad8ba1-23e8-40d7-95b8-60cc2803765c","Type":"ContainerStarted","Data":"088c22e76862831c169d01ffba13ab7a90f70ef72a69e1b9d2c3dfd3d4dc2b1e"} Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.280659 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-pz42s" event={"ID":"fba34147-9dd8-4e21-a886-9d1de06ef7ad","Type":"ContainerStarted","Data":"0487a5ad0f4f67552954d7ede7358c1ac38a8585d5eaa0e3bf93460004a94065"} Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.284847 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-lw8c8" event={"ID":"6da1078f-40ff-46ae-af83-b4befd08da78","Type":"ContainerStarted","Data":"67e1efd006f254cc01841deef487bd6396b3818cc10635c63d4a1a1df0cb5bfe"} Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.286738 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx" event={"ID":"8ef4d6f9-7a4c-4f30-984c-0a96f344a539","Type":"ContainerStarted","Data":"fe42082ed91c47984cff564f84d326405a3f00638c1e1e24ddf3205a4bd68f87"} Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.292224 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:1b684c4ca525a279deee45980140d895e264526c5c7e0a6981d6fae6cbcaa420\\\"\"" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx" podUID="8ef4d6f9-7a4c-4f30-984c-0a96f344a539" Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.295415 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-568985c78-q4rwb" event={"ID":"4a449bab-eabb-457b-94a3-c2a5bfd9827c","Type":"ContainerStarted","Data":"41cf215e09b113fba739af342fdd1380c9ff77936b41d047f0eab9fb8fa35120"} Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.297138 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-ntw8z" event={"ID":"74544171-df6c-4a36-8955-3b11da058598","Type":"ContainerStarted","Data":"09382bf1f41ec5727b1c8208efde18e5260671858ad42ccf248b04fd3da1171c"} Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.305472 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cqj6x" event={"ID":"76b61982-bafd-4f7a-a473-f5c0ef78ee74","Type":"ContainerStarted","Data":"eff039933a38aecc1ac2824aa1bf1048dcc334a33f0c6faa3f67538c7d21281e"} Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.310902 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h" event={"ID":"184dc664-1b08-43fe-bc79-6b6bdcc2563f","Type":"ContainerStarted","Data":"9fa9779c479045c2e84b7e3988259c5ca4b8992790c58a8c10ddf67bbe0d0036"} Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.311606 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cqj6x" podUID="76b61982-bafd-4f7a-a473-f5c0ef78ee74" Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.313454 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:4e3d234c1398039c2593611f7b0fd2a6b284cafb1563e6737876a265b9af42b6\\\"\"" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h" podUID="184dc664-1b08-43fe-bc79-6b6bdcc2563f" Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.317991 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg" podUID="18c82783-5901-4ed0-ba58-23f5951cc7d1" Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.331409 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg" event={"ID":"18c82783-5901-4ed0-ba58-23f5951cc7d1","Type":"ContainerStarted","Data":"a22235917ca9265db5d41529eab8aeab730d7bae448d885e695587c7d9235130"} Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.331484 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-7krlv" event={"ID":"bd426be0-7b17-4514-b36a-8e25c067a2e9","Type":"ContainerStarted","Data":"7dba940a70e6a6233951ee299c11f4c5af454cd0971abd474aa79ce9452d5c09"} Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.331500 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-658dd65b86-nkztz" event={"ID":"fc26f7fb-7146-41b2-b288-96c827f08dd4","Type":"ContainerStarted","Data":"30a960097a2e2edfe3e790c268d3d19f77f497c4abde7bbc0731576cd6c58a31"} Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.337798 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-chgf2" event={"ID":"d74a08a3-5b46-47c3-8c74-d29e532e4df3","Type":"ContainerStarted","Data":"f9df0c9d6d19a35397cbbe0a8e92665765891ae2c116be71bfea7b89881ce104"} Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.345714 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-rvf72" event={"ID":"943574ea-c3fa-4541-8c22-d0b799c7497a","Type":"ContainerStarted","Data":"c707cceff6c33075e1d34bac564543ca5544e9d67ea37bcab0c08996ff61cc50"} Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.349502 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-s6sv6" event={"ID":"1c68028b-8461-491c-bf85-78776c51b77d","Type":"ContainerStarted","Data":"42fcb31224a17d8ea615bf24ea2a59fc04c7b1dc6c6948aaabe60e0a4984a87f"} Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.355157 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-5t7n9" event={"ID":"754f68df-1972-403c-927d-898d74e9191a","Type":"ContainerStarted","Data":"8394a873decf7863bedd95ccfd448a17466c9e5e639d9a284684cfcdaf40cca8"} Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.362073 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc" event={"ID":"fd92de86-98c0-4c11-9895-7b78b2aef05b","Type":"ContainerStarted","Data":"4fd17c3d75e470012ae22e24cabcc4980279c9495b270efa0c02266acb2107a5"} Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.364476 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:f0ece9a81e4be3dbc1ff752a951970380546d8c0dea910953f862c219444b97a\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc" podUID="fd92de86-98c0-4c11-9895-7b78b2aef05b" Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.370467 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-r6lj7" event={"ID":"4e27981f-a624-4063-b5af-a6ee3fd1c535","Type":"ContainerStarted","Data":"adbca74efcec64c7d2430d3dec7cb37c8910569dfb3fd53eb8637c8212970c13"} Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.652094 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:32 crc kubenswrapper[4784]: I0106 08:31:32.652257 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.652513 4784 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.652615 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs podName:05e4a6ac-9ee3-4726-8709-945b37705103 nodeName:}" failed. No retries permitted until 2026-01-06 08:31:34.652597784 +0000 UTC m=+996.698770621 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs") pod "openstack-operator-controller-manager-77df58d67c-27bnt" (UID: "05e4a6ac-9ee3-4726-8709-945b37705103") : secret "webhook-server-cert" not found Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.654208 4784 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 06 08:31:32 crc kubenswrapper[4784]: E0106 08:31:32.654375 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs podName:05e4a6ac-9ee3-4726-8709-945b37705103 nodeName:}" failed. No retries permitted until 2026-01-06 08:31:34.654348315 +0000 UTC m=+996.700521152 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs") pod "openstack-operator-controller-manager-77df58d67c-27bnt" (UID: "05e4a6ac-9ee3-4726-8709-945b37705103") : secret "metrics-server-cert" not found Jan 06 08:31:33 crc kubenswrapper[4784]: E0106 08:31:33.393145 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg" podUID="18c82783-5901-4ed0-ba58-23f5951cc7d1" Jan 06 08:31:33 crc kubenswrapper[4784]: E0106 08:31:33.394071 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:f0ece9a81e4be3dbc1ff752a951970380546d8c0dea910953f862c219444b97a\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc" podUID="fd92de86-98c0-4c11-9895-7b78b2aef05b" Jan 06 08:31:33 crc kubenswrapper[4784]: E0106 08:31:33.394164 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cqj6x" podUID="76b61982-bafd-4f7a-a473-f5c0ef78ee74" Jan 06 08:31:33 crc kubenswrapper[4784]: E0106 08:31:33.394987 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:4e3d234c1398039c2593611f7b0fd2a6b284cafb1563e6737876a265b9af42b6\\\"\"" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h" podUID="184dc664-1b08-43fe-bc79-6b6bdcc2563f" Jan 06 08:31:33 crc kubenswrapper[4784]: E0106 08:31:33.395819 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:1b684c4ca525a279deee45980140d895e264526c5c7e0a6981d6fae6cbcaa420\\\"\"" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx" podUID="8ef4d6f9-7a4c-4f30-984c-0a96f344a539" Jan 06 08:31:33 crc kubenswrapper[4784]: I0106 08:31:33.776409 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert\") pod \"infra-operator-controller-manager-6d99759cf-tqhzj\" (UID: \"edc01c87-757b-47c7-b3cb-bfbb7ec71797\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" Jan 06 08:31:33 crc kubenswrapper[4784]: E0106 08:31:33.776626 4784 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 06 08:31:33 crc kubenswrapper[4784]: E0106 08:31:33.776671 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert podName:edc01c87-757b-47c7-b3cb-bfbb7ec71797 nodeName:}" failed. No retries permitted until 2026-01-06 08:31:37.776657278 +0000 UTC m=+999.822830115 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert") pod "infra-operator-controller-manager-6d99759cf-tqhzj" (UID: "edc01c87-757b-47c7-b3cb-bfbb7ec71797") : secret "infra-operator-webhook-server-cert" not found Jan 06 08:31:34 crc kubenswrapper[4784]: I0106 08:31:34.081238 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert\") pod \"openstack-baremetal-operator-controller-manager-74b998cd6-96jtt\" (UID: \"5303160b-4666-4800-8f86-72b1a823073d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:31:34 crc kubenswrapper[4784]: E0106 08:31:34.081565 4784 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 06 08:31:34 crc kubenswrapper[4784]: E0106 08:31:34.081674 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert podName:5303160b-4666-4800-8f86-72b1a823073d nodeName:}" failed. No retries permitted until 2026-01-06 08:31:38.081651598 +0000 UTC m=+1000.127824435 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert") pod "openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" (UID: "5303160b-4666-4800-8f86-72b1a823073d") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 06 08:31:34 crc kubenswrapper[4784]: I0106 08:31:34.692053 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:34 crc kubenswrapper[4784]: I0106 08:31:34.692672 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:34 crc kubenswrapper[4784]: E0106 08:31:34.692271 4784 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 06 08:31:34 crc kubenswrapper[4784]: E0106 08:31:34.692842 4784 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 06 08:31:34 crc kubenswrapper[4784]: E0106 08:31:34.693389 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs podName:05e4a6ac-9ee3-4726-8709-945b37705103 nodeName:}" failed. No retries permitted until 2026-01-06 08:31:38.692895962 +0000 UTC m=+1000.739068789 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs") pod "openstack-operator-controller-manager-77df58d67c-27bnt" (UID: "05e4a6ac-9ee3-4726-8709-945b37705103") : secret "metrics-server-cert" not found Jan 06 08:31:34 crc kubenswrapper[4784]: E0106 08:31:34.693410 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs podName:05e4a6ac-9ee3-4726-8709-945b37705103 nodeName:}" failed. No retries permitted until 2026-01-06 08:31:38.693403477 +0000 UTC m=+1000.739576314 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs") pod "openstack-operator-controller-manager-77df58d67c-27bnt" (UID: "05e4a6ac-9ee3-4726-8709-945b37705103") : secret "webhook-server-cert" not found Jan 06 08:31:37 crc kubenswrapper[4784]: I0106 08:31:37.858805 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert\") pod \"infra-operator-controller-manager-6d99759cf-tqhzj\" (UID: \"edc01c87-757b-47c7-b3cb-bfbb7ec71797\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" Jan 06 08:31:37 crc kubenswrapper[4784]: E0106 08:31:37.859053 4784 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 06 08:31:37 crc kubenswrapper[4784]: E0106 08:31:37.859510 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert podName:edc01c87-757b-47c7-b3cb-bfbb7ec71797 nodeName:}" failed. No retries permitted until 2026-01-06 08:31:45.859483499 +0000 UTC m=+1007.905656336 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert") pod "infra-operator-controller-manager-6d99759cf-tqhzj" (UID: "edc01c87-757b-47c7-b3cb-bfbb7ec71797") : secret "infra-operator-webhook-server-cert" not found Jan 06 08:31:38 crc kubenswrapper[4784]: I0106 08:31:38.163218 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert\") pod \"openstack-baremetal-operator-controller-manager-74b998cd6-96jtt\" (UID: \"5303160b-4666-4800-8f86-72b1a823073d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:31:38 crc kubenswrapper[4784]: E0106 08:31:38.163462 4784 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 06 08:31:38 crc kubenswrapper[4784]: E0106 08:31:38.163601 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert podName:5303160b-4666-4800-8f86-72b1a823073d nodeName:}" failed. No retries permitted until 2026-01-06 08:31:46.163569673 +0000 UTC m=+1008.209742670 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert") pod "openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" (UID: "5303160b-4666-4800-8f86-72b1a823073d") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 06 08:31:38 crc kubenswrapper[4784]: I0106 08:31:38.775094 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:38 crc kubenswrapper[4784]: E0106 08:31:38.775361 4784 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 06 08:31:38 crc kubenswrapper[4784]: I0106 08:31:38.775402 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:38 crc kubenswrapper[4784]: E0106 08:31:38.775480 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs podName:05e4a6ac-9ee3-4726-8709-945b37705103 nodeName:}" failed. No retries permitted until 2026-01-06 08:31:46.775454796 +0000 UTC m=+1008.821627633 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs") pod "openstack-operator-controller-manager-77df58d67c-27bnt" (UID: "05e4a6ac-9ee3-4726-8709-945b37705103") : secret "webhook-server-cert" not found Jan 06 08:31:38 crc kubenswrapper[4784]: E0106 08:31:38.775878 4784 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 06 08:31:38 crc kubenswrapper[4784]: E0106 08:31:38.777705 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs podName:05e4a6ac-9ee3-4726-8709-945b37705103 nodeName:}" failed. No retries permitted until 2026-01-06 08:31:46.77766594 +0000 UTC m=+1008.823838917 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs") pod "openstack-operator-controller-manager-77df58d67c-27bnt" (UID: "05e4a6ac-9ee3-4726-8709-945b37705103") : secret "metrics-server-cert" not found Jan 06 08:31:44 crc kubenswrapper[4784]: I0106 08:31:44.351288 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:31:44 crc kubenswrapper[4784]: I0106 08:31:44.351777 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:31:44 crc kubenswrapper[4784]: I0106 08:31:44.351817 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:31:44 crc kubenswrapper[4784]: I0106 08:31:44.352341 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9cd4e5f96c6907f66a8c281dacda866138cb3ec7ef90bd2f8123d427c09cf064"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 08:31:44 crc kubenswrapper[4784]: I0106 08:31:44.352384 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://9cd4e5f96c6907f66a8c281dacda866138cb3ec7ef90bd2f8123d427c09cf064" gracePeriod=600 Jan 06 08:31:44 crc kubenswrapper[4784]: I0106 08:31:44.493867 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="9cd4e5f96c6907f66a8c281dacda866138cb3ec7ef90bd2f8123d427c09cf064" exitCode=0 Jan 06 08:31:44 crc kubenswrapper[4784]: I0106 08:31:44.493927 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"9cd4e5f96c6907f66a8c281dacda866138cb3ec7ef90bd2f8123d427c09cf064"} Jan 06 08:31:44 crc kubenswrapper[4784]: I0106 08:31:44.493961 4784 scope.go:117] "RemoveContainer" containerID="79b856f10bb460704a3c69053ad2f54af7d0f23c85d18e02491af96b533c786d" Jan 06 08:31:45 crc kubenswrapper[4784]: I0106 08:31:45.915937 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert\") pod \"infra-operator-controller-manager-6d99759cf-tqhzj\" (UID: \"edc01c87-757b-47c7-b3cb-bfbb7ec71797\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" Jan 06 08:31:45 crc kubenswrapper[4784]: I0106 08:31:45.924665 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/edc01c87-757b-47c7-b3cb-bfbb7ec71797-cert\") pod \"infra-operator-controller-manager-6d99759cf-tqhzj\" (UID: \"edc01c87-757b-47c7-b3cb-bfbb7ec71797\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" Jan 06 08:31:45 crc kubenswrapper[4784]: I0106 08:31:45.978836 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" Jan 06 08:31:46 crc kubenswrapper[4784]: I0106 08:31:46.220158 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert\") pod \"openstack-baremetal-operator-controller-manager-74b998cd6-96jtt\" (UID: \"5303160b-4666-4800-8f86-72b1a823073d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:31:46 crc kubenswrapper[4784]: E0106 08:31:46.220466 4784 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 06 08:31:46 crc kubenswrapper[4784]: E0106 08:31:46.220686 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert podName:5303160b-4666-4800-8f86-72b1a823073d nodeName:}" failed. No retries permitted until 2026-01-06 08:32:02.220645617 +0000 UTC m=+1024.266818504 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert") pod "openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" (UID: "5303160b-4666-4800-8f86-72b1a823073d") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 06 08:31:46 crc kubenswrapper[4784]: I0106 08:31:46.831967 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:46 crc kubenswrapper[4784]: E0106 08:31:46.832235 4784 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 06 08:31:46 crc kubenswrapper[4784]: E0106 08:31:46.833190 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs podName:05e4a6ac-9ee3-4726-8709-945b37705103 nodeName:}" failed. No retries permitted until 2026-01-06 08:32:02.833153818 +0000 UTC m=+1024.879326815 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs") pod "openstack-operator-controller-manager-77df58d67c-27bnt" (UID: "05e4a6ac-9ee3-4726-8709-945b37705103") : secret "metrics-server-cert" not found Jan 06 08:31:46 crc kubenswrapper[4784]: E0106 08:31:46.833490 4784 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 06 08:31:46 crc kubenswrapper[4784]: E0106 08:31:46.833683 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs podName:05e4a6ac-9ee3-4726-8709-945b37705103 nodeName:}" failed. No retries permitted until 2026-01-06 08:32:02.833642382 +0000 UTC m=+1024.879815259 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs") pod "openstack-operator-controller-manager-77df58d67c-27bnt" (UID: "05e4a6ac-9ee3-4726-8709-945b37705103") : secret "webhook-server-cert" not found Jan 06 08:31:46 crc kubenswrapper[4784]: I0106 08:31:46.833814 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:31:54 crc kubenswrapper[4784]: E0106 08:31:54.353520 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:879d3d679b58ae84419b7907ad092ad4d24bcc9222ce621ce464fd0fea347b0c" Jan 06 08:31:54 crc kubenswrapper[4784]: E0106 08:31:54.354873 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:879d3d679b58ae84419b7907ad092ad4d24bcc9222ce621ce464fd0fea347b0c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cx9qm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-568985c78-q4rwb_openstack-operators(4a449bab-eabb-457b-94a3-c2a5bfd9827c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 06 08:31:54 crc kubenswrapper[4784]: E0106 08:31:54.356135 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-568985c78-q4rwb" podUID="4a449bab-eabb-457b-94a3-c2a5bfd9827c" Jan 06 08:31:54 crc kubenswrapper[4784]: E0106 08:31:54.575508 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:879d3d679b58ae84419b7907ad092ad4d24bcc9222ce621ce464fd0fea347b0c\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-568985c78-q4rwb" podUID="4a449bab-eabb-457b-94a3-c2a5bfd9827c" Jan 06 08:31:54 crc kubenswrapper[4784]: E0106 08:31:54.902722 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/telemetry-operator@sha256:3c1b2858c64110448d801905fbbf3ffe7f78d264cc46ab12ab2d724842dba309" Jan 06 08:31:54 crc kubenswrapper[4784]: E0106 08:31:54.902937 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:3c1b2858c64110448d801905fbbf3ffe7f78d264cc46ab12ab2d724842dba309,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tsdht,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-68d988df55-r6lj7_openstack-operators(4e27981f-a624-4063-b5af-a6ee3fd1c535): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 06 08:31:54 crc kubenswrapper[4784]: E0106 08:31:54.904147 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-r6lj7" podUID="4e27981f-a624-4063-b5af-a6ee3fd1c535" Jan 06 08:31:55 crc kubenswrapper[4784]: E0106 08:31:55.539471 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670" Jan 06 08:31:55 crc kubenswrapper[4784]: E0106 08:31:55.540099 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5cxsq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-5fbbf8b6cc-pz42s_openstack-operators(fba34147-9dd8-4e21-a886-9d1de06ef7ad): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 06 08:31:55 crc kubenswrapper[4784]: E0106 08:31:55.543530 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-pz42s" podUID="fba34147-9dd8-4e21-a886-9d1de06ef7ad" Jan 06 08:31:55 crc kubenswrapper[4784]: E0106 08:31:55.606789 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670\\\"\"" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-pz42s" podUID="fba34147-9dd8-4e21-a886-9d1de06ef7ad" Jan 06 08:31:55 crc kubenswrapper[4784]: E0106 08:31:55.607359 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:3c1b2858c64110448d801905fbbf3ffe7f78d264cc46ab12ab2d724842dba309\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-r6lj7" podUID="4e27981f-a624-4063-b5af-a6ee3fd1c535" Jan 06 08:31:56 crc kubenswrapper[4784]: I0106 08:31:56.032787 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj"] Jan 06 08:31:56 crc kubenswrapper[4784]: I0106 08:31:56.294099 4784 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 06 08:31:56 crc kubenswrapper[4784]: I0106 08:31:56.597683 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" event={"ID":"edc01c87-757b-47c7-b3cb-bfbb7ec71797","Type":"ContainerStarted","Data":"1d65438f593cdedac846652ee194af487e671721aaf733c1dcad8b310e0b9249"} Jan 06 08:31:58 crc kubenswrapper[4784]: I0106 08:31:58.612510 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-7krlv" event={"ID":"bd426be0-7b17-4514-b36a-8e25c067a2e9","Type":"ContainerStarted","Data":"a4aaaabe841f465985a53402ab367cb5ca98038312af0384b7f02fa8db5ba36d"} Jan 06 08:31:58 crc kubenswrapper[4784]: I0106 08:31:58.613716 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-7krlv" Jan 06 08:31:58 crc kubenswrapper[4784]: I0106 08:31:58.645763 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-7krlv" podStartSLOduration=5.425055278 podStartE2EDuration="29.645740588s" podCreationTimestamp="2026-01-06 08:31:29 +0000 UTC" firstStartedPulling="2026-01-06 08:31:31.318402664 +0000 UTC m=+993.364575501" lastFinishedPulling="2026-01-06 08:31:55.539087974 +0000 UTC m=+1017.585260811" observedRunningTime="2026-01-06 08:31:58.631429935 +0000 UTC m=+1020.677602792" watchObservedRunningTime="2026-01-06 08:31:58.645740588 +0000 UTC m=+1020.691913435" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.634507 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-k79sj" event={"ID":"9b052e6e-0823-402f-8e8b-bbd48d0ea36d","Type":"ContainerStarted","Data":"09d9631f0c745664d79a8b73e401bab1724ab9f856f48806787d4c3c6e75d120"} Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.635010 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-k79sj" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.641780 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-lw8c8" event={"ID":"6da1078f-40ff-46ae-af83-b4befd08da78","Type":"ContainerStarted","Data":"69dcaa0584947910fd6604afd304acc232019992307aeb3051b74a07d7fd46b1"} Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.641845 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-lw8c8" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.645577 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-78979fc445-9c6jw" event={"ID":"07c9eb1a-7ea5-4982-a6e9-fffba06a9dd7","Type":"ContainerStarted","Data":"65f596529ae6f529999a21eff96033f2bd4d9cf346c791657fa4dd67eb8cd1af"} Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.645790 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-78979fc445-9c6jw" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.651299 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-658dd65b86-nkztz" event={"ID":"fc26f7fb-7146-41b2-b288-96c827f08dd4","Type":"ContainerStarted","Data":"16972ac4729c9abca2146e960507d4ba9684723482f4dc09f41d783757e35dbf"} Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.651415 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-658dd65b86-nkztz" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.655076 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-zvg9p" event={"ID":"1a45b65a-e45a-4a0e-be79-81b1d9d60c35","Type":"ContainerStarted","Data":"6c2f0fe9979d83a5c5c6047d1dfcfb164b3c1878761659e56e3573a091758c74"} Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.655177 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-zvg9p" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.661734 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-chgf2" event={"ID":"d74a08a3-5b46-47c3-8c74-d29e532e4df3","Type":"ContainerStarted","Data":"7321d5b349fe1639ec8a0031a354c8a4bce0dce8a3586ef0464972434d63e24c"} Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.661974 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-chgf2" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.664447 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-ntw8z" event={"ID":"74544171-df6c-4a36-8955-3b11da058598","Type":"ContainerStarted","Data":"0463bed9d848bf83f7b9c5d2b41ed206b04b7775f469872edc7b8630ad6ec97d"} Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.664606 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-ntw8z" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.667563 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-k79sj" podStartSLOduration=7.482344787 podStartE2EDuration="31.667526321s" podCreationTimestamp="2026-01-06 08:31:29 +0000 UTC" firstStartedPulling="2026-01-06 08:31:31.351533282 +0000 UTC m=+993.397706119" lastFinishedPulling="2026-01-06 08:31:55.536714816 +0000 UTC m=+1017.582887653" observedRunningTime="2026-01-06 08:32:00.661514297 +0000 UTC m=+1022.707687134" watchObservedRunningTime="2026-01-06 08:32:00.667526321 +0000 UTC m=+1022.713699158" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.671906 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-5t7n9" event={"ID":"754f68df-1972-403c-927d-898d74e9191a","Type":"ContainerStarted","Data":"d241d82dec939e3bce56f7d47195d79c9e8641866fd52654f1b100b93906036c"} Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.672083 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-5t7n9" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.682203 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-9gkmh" event={"ID":"adad8ba1-23e8-40d7-95b8-60cc2803765c","Type":"ContainerStarted","Data":"58f840a7a59e94c383b4ede09446b543bac30ed1371b43d95742bd301063b713"} Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.682292 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-9gkmh" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.691593 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-rvf72" event={"ID":"943574ea-c3fa-4541-8c22-d0b799c7497a","Type":"ContainerStarted","Data":"39ea19c38bfe2ec4530d4120ba59865c1b227eb9d46f408f18d73beea5f2c69e"} Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.691820 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-rvf72" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.698619 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-zvg9p" podStartSLOduration=7.137173646 podStartE2EDuration="31.69859954s" podCreationTimestamp="2026-01-06 08:31:29 +0000 UTC" firstStartedPulling="2026-01-06 08:31:30.975325673 +0000 UTC m=+993.021498510" lastFinishedPulling="2026-01-06 08:31:55.536751567 +0000 UTC m=+1017.582924404" observedRunningTime="2026-01-06 08:32:00.694694317 +0000 UTC m=+1022.740867154" watchObservedRunningTime="2026-01-06 08:32:00.69859954 +0000 UTC m=+1022.744772377" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.702862 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-s6sv6" event={"ID":"1c68028b-8461-491c-bf85-78776c51b77d","Type":"ContainerStarted","Data":"f0dac368e95bb4691ccd3c11a2b37c97f2faf15af4e3578500404f4a2b49d13b"} Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.703635 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-s6sv6" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.711486 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"19653971273eef9ff17d8783cce6bbf50869f896c3dc99c1be3ca028e61421fd"} Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.759766 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-78979fc445-9c6jw" podStartSLOduration=7.180769186 podStartE2EDuration="31.759749988s" podCreationTimestamp="2026-01-06 08:31:29 +0000 UTC" firstStartedPulling="2026-01-06 08:31:30.956598571 +0000 UTC m=+993.002771408" lastFinishedPulling="2026-01-06 08:31:55.535579383 +0000 UTC m=+1017.581752210" observedRunningTime="2026-01-06 08:32:00.731425178 +0000 UTC m=+1022.777598015" watchObservedRunningTime="2026-01-06 08:32:00.759749988 +0000 UTC m=+1022.805922825" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.762394 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-lw8c8" podStartSLOduration=7.572906866 podStartE2EDuration="31.762385074s" podCreationTimestamp="2026-01-06 08:31:29 +0000 UTC" firstStartedPulling="2026-01-06 08:31:31.346554248 +0000 UTC m=+993.392727085" lastFinishedPulling="2026-01-06 08:31:55.536032456 +0000 UTC m=+1017.582205293" observedRunningTime="2026-01-06 08:32:00.760799338 +0000 UTC m=+1022.806972175" watchObservedRunningTime="2026-01-06 08:32:00.762385074 +0000 UTC m=+1022.808557901" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.781570 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-658dd65b86-nkztz" podStartSLOduration=7.503939702 podStartE2EDuration="31.781537939s" podCreationTimestamp="2026-01-06 08:31:29 +0000 UTC" firstStartedPulling="2026-01-06 08:31:31.284834103 +0000 UTC m=+993.331006940" lastFinishedPulling="2026-01-06 08:31:55.56243234 +0000 UTC m=+1017.608605177" observedRunningTime="2026-01-06 08:32:00.779276592 +0000 UTC m=+1022.825449429" watchObservedRunningTime="2026-01-06 08:32:00.781537939 +0000 UTC m=+1022.827710776" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.799971 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-chgf2" podStartSLOduration=8.258508601 podStartE2EDuration="31.79994554s" podCreationTimestamp="2026-01-06 08:31:29 +0000 UTC" firstStartedPulling="2026-01-06 08:31:31.994908066 +0000 UTC m=+994.041080913" lastFinishedPulling="2026-01-06 08:31:55.536345015 +0000 UTC m=+1017.582517852" observedRunningTime="2026-01-06 08:32:00.796335656 +0000 UTC m=+1022.842508493" watchObservedRunningTime="2026-01-06 08:32:00.79994554 +0000 UTC m=+1022.846118377" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.823756 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-5t7n9" podStartSLOduration=7.878222114 podStartE2EDuration="31.823739178s" podCreationTimestamp="2026-01-06 08:31:29 +0000 UTC" firstStartedPulling="2026-01-06 08:31:31.586253929 +0000 UTC m=+993.632426766" lastFinishedPulling="2026-01-06 08:31:55.531770993 +0000 UTC m=+1017.577943830" observedRunningTime="2026-01-06 08:32:00.817175069 +0000 UTC m=+1022.863347906" watchObservedRunningTime="2026-01-06 08:32:00.823739178 +0000 UTC m=+1022.869912015" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.841917 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-9gkmh" podStartSLOduration=8.085979102 podStartE2EDuration="31.841900694s" podCreationTimestamp="2026-01-06 08:31:29 +0000 UTC" firstStartedPulling="2026-01-06 08:31:31.850989824 +0000 UTC m=+993.897162661" lastFinishedPulling="2026-01-06 08:31:55.606911416 +0000 UTC m=+1017.653084253" observedRunningTime="2026-01-06 08:32:00.840662007 +0000 UTC m=+1022.886834854" watchObservedRunningTime="2026-01-06 08:32:00.841900694 +0000 UTC m=+1022.888073531" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.860745 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-s6sv6" podStartSLOduration=7.928010444 podStartE2EDuration="31.860724278s" podCreationTimestamp="2026-01-06 08:31:29 +0000 UTC" firstStartedPulling="2026-01-06 08:31:31.603095776 +0000 UTC m=+993.649268613" lastFinishedPulling="2026-01-06 08:31:55.535809609 +0000 UTC m=+1017.581982447" observedRunningTime="2026-01-06 08:32:00.860031777 +0000 UTC m=+1022.906204614" watchObservedRunningTime="2026-01-06 08:32:00.860724278 +0000 UTC m=+1022.906897115" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.915557 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-ntw8z" podStartSLOduration=7.762514848 podStartE2EDuration="31.915527403s" podCreationTimestamp="2026-01-06 08:31:29 +0000 UTC" firstStartedPulling="2026-01-06 08:31:31.375461683 +0000 UTC m=+993.421634520" lastFinishedPulling="2026-01-06 08:31:55.528474238 +0000 UTC m=+1017.574647075" observedRunningTime="2026-01-06 08:32:00.910061855 +0000 UTC m=+1022.956234692" watchObservedRunningTime="2026-01-06 08:32:00.915527403 +0000 UTC m=+1022.961700240" Jan 06 08:32:00 crc kubenswrapper[4784]: I0106 08:32:00.932207 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-rvf72" podStartSLOduration=7.36463431 podStartE2EDuration="30.932187345s" podCreationTimestamp="2026-01-06 08:31:30 +0000 UTC" firstStartedPulling="2026-01-06 08:31:31.969181652 +0000 UTC m=+994.015354489" lastFinishedPulling="2026-01-06 08:31:55.536734687 +0000 UTC m=+1017.582907524" observedRunningTime="2026-01-06 08:32:00.931367601 +0000 UTC m=+1022.977540438" watchObservedRunningTime="2026-01-06 08:32:00.932187345 +0000 UTC m=+1022.978360182" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.301375 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert\") pod \"openstack-baremetal-operator-controller-manager-74b998cd6-96jtt\" (UID: \"5303160b-4666-4800-8f86-72b1a823073d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.309041 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5303160b-4666-4800-8f86-72b1a823073d-cert\") pod \"openstack-baremetal-operator-controller-manager-74b998cd6-96jtt\" (UID: \"5303160b-4666-4800-8f86-72b1a823073d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.458079 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-rb84m" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.464715 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.762307 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cqj6x" event={"ID":"76b61982-bafd-4f7a-a473-f5c0ef78ee74","Type":"ContainerStarted","Data":"cc7e470d3bdad07e052ae4e8fd453503724fcc67e11553d5c052d8d7c3f5112d"} Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.769813 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h" event={"ID":"184dc664-1b08-43fe-bc79-6b6bdcc2563f","Type":"ContainerStarted","Data":"556bfc35a64dd12c63ddc051d733eb97eb761042a467326b3da5344daa01f523"} Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.770275 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.772772 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg" event={"ID":"18c82783-5901-4ed0-ba58-23f5951cc7d1","Type":"ContainerStarted","Data":"c77e1b8694f3e3a510484eca45c92e0941e6e85893de07483e3f1d72c2ae64b5"} Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.773229 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.774327 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc" event={"ID":"fd92de86-98c0-4c11-9895-7b78b2aef05b","Type":"ContainerStarted","Data":"492eadbf27399021947a55f38d9c2c13e47c54bc8e3bd1b15aff7eb5edf7ff29"} Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.774768 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.776032 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx" event={"ID":"8ef4d6f9-7a4c-4f30-984c-0a96f344a539","Type":"ContainerStarted","Data":"95f199ee1a2327408d5b3b9102350e9f88bb6d1321eb378096ecb0d4801deff6"} Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.776394 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.778234 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" event={"ID":"edc01c87-757b-47c7-b3cb-bfbb7ec71797","Type":"ContainerStarted","Data":"458e2cd90435caafe08cfbb994f6f51d256d997386de8e9072223711fcf287d8"} Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.782737 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cqj6x" podStartSLOduration=5.159531856 podStartE2EDuration="32.782724666s" podCreationTimestamp="2026-01-06 08:31:30 +0000 UTC" firstStartedPulling="2026-01-06 08:31:32.153879852 +0000 UTC m=+994.200052679" lastFinishedPulling="2026-01-06 08:31:59.777072612 +0000 UTC m=+1021.823245489" observedRunningTime="2026-01-06 08:32:02.780592084 +0000 UTC m=+1024.826764921" watchObservedRunningTime="2026-01-06 08:32:02.782724666 +0000 UTC m=+1024.828897503" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.800711 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx" podStartSLOduration=5.325708571 podStartE2EDuration="32.800689615s" podCreationTimestamp="2026-01-06 08:31:30 +0000 UTC" firstStartedPulling="2026-01-06 08:31:32.112771714 +0000 UTC m=+994.158944551" lastFinishedPulling="2026-01-06 08:31:59.587752748 +0000 UTC m=+1021.633925595" observedRunningTime="2026-01-06 08:32:02.797161774 +0000 UTC m=+1024.843334611" watchObservedRunningTime="2026-01-06 08:32:02.800689615 +0000 UTC m=+1024.846862452" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.815855 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" podStartSLOduration=28.457875109 podStartE2EDuration="33.815834213s" podCreationTimestamp="2026-01-06 08:31:29 +0000 UTC" firstStartedPulling="2026-01-06 08:31:56.293668654 +0000 UTC m=+1018.339841521" lastFinishedPulling="2026-01-06 08:32:01.651627788 +0000 UTC m=+1023.697800625" observedRunningTime="2026-01-06 08:32:02.813526357 +0000 UTC m=+1024.859699194" watchObservedRunningTime="2026-01-06 08:32:02.815834213 +0000 UTC m=+1024.862007050" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.841665 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc" podStartSLOduration=5.134222774 podStartE2EDuration="32.84164194s" podCreationTimestamp="2026-01-06 08:31:30 +0000 UTC" firstStartedPulling="2026-01-06 08:31:32.133490023 +0000 UTC m=+994.179662860" lastFinishedPulling="2026-01-06 08:31:59.840909169 +0000 UTC m=+1021.887082026" observedRunningTime="2026-01-06 08:32:02.838470228 +0000 UTC m=+1024.884643065" watchObservedRunningTime="2026-01-06 08:32:02.84164194 +0000 UTC m=+1024.887814767" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.862493 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg" podStartSLOduration=5.050451811 podStartE2EDuration="32.862465651s" podCreationTimestamp="2026-01-06 08:31:30 +0000 UTC" firstStartedPulling="2026-01-06 08:31:32.035701525 +0000 UTC m=+994.081874362" lastFinishedPulling="2026-01-06 08:31:59.847715365 +0000 UTC m=+1021.893888202" observedRunningTime="2026-01-06 08:32:02.857149468 +0000 UTC m=+1024.903322295" watchObservedRunningTime="2026-01-06 08:32:02.862465651 +0000 UTC m=+1024.908638488" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.877666 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h" podStartSLOduration=5.194825607 podStartE2EDuration="32.877646401s" podCreationTimestamp="2026-01-06 08:31:30 +0000 UTC" firstStartedPulling="2026-01-06 08:31:32.041921745 +0000 UTC m=+994.088094582" lastFinishedPulling="2026-01-06 08:31:59.724742539 +0000 UTC m=+1021.770915376" observedRunningTime="2026-01-06 08:32:02.871662127 +0000 UTC m=+1024.917834964" watchObservedRunningTime="2026-01-06 08:32:02.877646401 +0000 UTC m=+1024.923819238" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.940235 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.940347 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.950317 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-webhook-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:32:02 crc kubenswrapper[4784]: I0106 08:32:02.973287 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/05e4a6ac-9ee3-4726-8709-945b37705103-metrics-certs\") pod \"openstack-operator-controller-manager-77df58d67c-27bnt\" (UID: \"05e4a6ac-9ee3-4726-8709-945b37705103\") " pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:32:03 crc kubenswrapper[4784]: I0106 08:32:03.017699 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt"] Jan 06 08:32:03 crc kubenswrapper[4784]: I0106 08:32:03.202694 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-bgz56" Jan 06 08:32:03 crc kubenswrapper[4784]: I0106 08:32:03.210967 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:32:03 crc kubenswrapper[4784]: I0106 08:32:03.676226 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt"] Jan 06 08:32:03 crc kubenswrapper[4784]: W0106 08:32:03.678084 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05e4a6ac_9ee3_4726_8709_945b37705103.slice/crio-2d071a8a7c3cabe976f86845e37162589f1c4214a02e0e403fb6563c2dd43a40 WatchSource:0}: Error finding container 2d071a8a7c3cabe976f86845e37162589f1c4214a02e0e403fb6563c2dd43a40: Status 404 returned error can't find the container with id 2d071a8a7c3cabe976f86845e37162589f1c4214a02e0e403fb6563c2dd43a40 Jan 06 08:32:03 crc kubenswrapper[4784]: I0106 08:32:03.790016 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" event={"ID":"5303160b-4666-4800-8f86-72b1a823073d","Type":"ContainerStarted","Data":"fdc906d1d6b49abc1dc22645bcb1001ee178c4a1a2cec82575c12611c283c009"} Jan 06 08:32:03 crc kubenswrapper[4784]: I0106 08:32:03.793012 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" event={"ID":"05e4a6ac-9ee3-4726-8709-945b37705103","Type":"ContainerStarted","Data":"2d071a8a7c3cabe976f86845e37162589f1c4214a02e0e403fb6563c2dd43a40"} Jan 06 08:32:03 crc kubenswrapper[4784]: I0106 08:32:03.794084 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" Jan 06 08:32:04 crc kubenswrapper[4784]: I0106 08:32:04.804855 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" event={"ID":"05e4a6ac-9ee3-4726-8709-945b37705103","Type":"ContainerStarted","Data":"fd7d5675ed502121d622ac4e651fb601a6ca7db09aa7a470c3f52929e425530a"} Jan 06 08:32:04 crc kubenswrapper[4784]: I0106 08:32:04.804999 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:32:05 crc kubenswrapper[4784]: I0106 08:32:05.814502 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" event={"ID":"5303160b-4666-4800-8f86-72b1a823073d","Type":"ContainerStarted","Data":"2d81fd6a6a8032a421e4bfcdff5c10cffa3d9edc8b7de1600a5dc7bcd6b36b08"} Jan 06 08:32:05 crc kubenswrapper[4784]: I0106 08:32:05.853303 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" podStartSLOduration=34.862775497 podStartE2EDuration="36.853286546s" podCreationTimestamp="2026-01-06 08:31:29 +0000 UTC" firstStartedPulling="2026-01-06 08:32:03.02565346 +0000 UTC m=+1025.071826297" lastFinishedPulling="2026-01-06 08:32:05.016164509 +0000 UTC m=+1027.062337346" observedRunningTime="2026-01-06 08:32:05.846491829 +0000 UTC m=+1027.892664666" watchObservedRunningTime="2026-01-06 08:32:05.853286546 +0000 UTC m=+1027.899459383" Jan 06 08:32:05 crc kubenswrapper[4784]: I0106 08:32:05.853696 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" podStartSLOduration=35.853691268 podStartE2EDuration="35.853691268s" podCreationTimestamp="2026-01-06 08:31:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:32:04.839178521 +0000 UTC m=+1026.885351368" watchObservedRunningTime="2026-01-06 08:32:05.853691268 +0000 UTC m=+1027.899864105" Jan 06 08:32:06 crc kubenswrapper[4784]: I0106 08:32:06.824169 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:32:07 crc kubenswrapper[4784]: I0106 08:32:07.839036 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-pz42s" event={"ID":"fba34147-9dd8-4e21-a886-9d1de06ef7ad","Type":"ContainerStarted","Data":"bf17911cd64b81b152d1659bb1e2267b77614afca5536ccc8beb57a4aeaf7a63"} Jan 06 08:32:07 crc kubenswrapper[4784]: I0106 08:32:07.839858 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-pz42s" Jan 06 08:32:07 crc kubenswrapper[4784]: I0106 08:32:07.865950 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-pz42s" podStartSLOduration=3.681113679 podStartE2EDuration="38.865912094s" podCreationTimestamp="2026-01-06 08:31:29 +0000 UTC" firstStartedPulling="2026-01-06 08:31:32.023038709 +0000 UTC m=+994.069211546" lastFinishedPulling="2026-01-06 08:32:07.207837124 +0000 UTC m=+1029.254009961" observedRunningTime="2026-01-06 08:32:07.865005267 +0000 UTC m=+1029.911178184" watchObservedRunningTime="2026-01-06 08:32:07.865912094 +0000 UTC m=+1029.912084961" Jan 06 08:32:09 crc kubenswrapper[4784]: I0106 08:32:09.972818 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-zvg9p" Jan 06 08:32:09 crc kubenswrapper[4784]: I0106 08:32:09.982407 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-78979fc445-9c6jw" Jan 06 08:32:10 crc kubenswrapper[4784]: I0106 08:32:10.103618 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-k79sj" Jan 06 08:32:10 crc kubenswrapper[4784]: I0106 08:32:10.159515 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-7krlv" Jan 06 08:32:10 crc kubenswrapper[4784]: I0106 08:32:10.253182 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-658dd65b86-nkztz" Jan 06 08:32:10 crc kubenswrapper[4784]: I0106 08:32:10.286532 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-lw8c8" Jan 06 08:32:10 crc kubenswrapper[4784]: I0106 08:32:10.440032 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-ntw8z" Jan 06 08:32:10 crc kubenswrapper[4784]: I0106 08:32:10.482281 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-s6sv6" Jan 06 08:32:10 crc kubenswrapper[4784]: I0106 08:32:10.515090 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-chgf2" Jan 06 08:32:10 crc kubenswrapper[4784]: I0106 08:32:10.531391 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-5t7n9" Jan 06 08:32:10 crc kubenswrapper[4784]: I0106 08:32:10.553441 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-9gkmh" Jan 06 08:32:10 crc kubenswrapper[4784]: I0106 08:32:10.742350 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-8lhqg" Jan 06 08:32:10 crc kubenswrapper[4784]: I0106 08:32:10.867763 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-r6lj7" event={"ID":"4e27981f-a624-4063-b5af-a6ee3fd1c535","Type":"ContainerStarted","Data":"b427ef30e3cf80e16a7d4764e956ebf5f7e567bd6cc5103962372088bec0e249"} Jan 06 08:32:10 crc kubenswrapper[4784]: I0106 08:32:10.869124 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-r6lj7" Jan 06 08:32:10 crc kubenswrapper[4784]: I0106 08:32:10.891261 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-r6lj7" podStartSLOduration=3.021128451 podStartE2EDuration="40.891234725s" podCreationTimestamp="2026-01-06 08:31:30 +0000 UTC" firstStartedPulling="2026-01-06 08:31:31.934571181 +0000 UTC m=+993.980744008" lastFinishedPulling="2026-01-06 08:32:09.804677445 +0000 UTC m=+1031.850850282" observedRunningTime="2026-01-06 08:32:10.889137615 +0000 UTC m=+1032.935310462" watchObservedRunningTime="2026-01-06 08:32:10.891234725 +0000 UTC m=+1032.937407562" Jan 06 08:32:10 crc kubenswrapper[4784]: I0106 08:32:10.891485 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-rvf72" Jan 06 08:32:10 crc kubenswrapper[4784]: I0106 08:32:10.955069 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8bdgx" Jan 06 08:32:11 crc kubenswrapper[4784]: I0106 08:32:11.007174 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-qfc8h" Jan 06 08:32:11 crc kubenswrapper[4784]: I0106 08:32:11.073341 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-wk4xc" Jan 06 08:32:11 crc kubenswrapper[4784]: I0106 08:32:11.878781 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-568985c78-q4rwb" event={"ID":"4a449bab-eabb-457b-94a3-c2a5bfd9827c","Type":"ContainerStarted","Data":"58abc89fb63df53b9d33cc4db0e880bd9bcd4a92bafcdb448d34eda00184e587"} Jan 06 08:32:11 crc kubenswrapper[4784]: I0106 08:32:11.879249 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-568985c78-q4rwb" Jan 06 08:32:11 crc kubenswrapper[4784]: I0106 08:32:11.912965 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-568985c78-q4rwb" podStartSLOduration=3.675944188 podStartE2EDuration="42.912887908s" podCreationTimestamp="2026-01-06 08:31:29 +0000 UTC" firstStartedPulling="2026-01-06 08:31:31.603412305 +0000 UTC m=+993.649585142" lastFinishedPulling="2026-01-06 08:32:10.840356025 +0000 UTC m=+1032.886528862" observedRunningTime="2026-01-06 08:32:11.903910829 +0000 UTC m=+1033.950083696" watchObservedRunningTime="2026-01-06 08:32:11.912887908 +0000 UTC m=+1033.959060785" Jan 06 08:32:12 crc kubenswrapper[4784]: I0106 08:32:12.472718 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-74b998cd6-96jtt" Jan 06 08:32:13 crc kubenswrapper[4784]: I0106 08:32:13.219946 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-77df58d67c-27bnt" Jan 06 08:32:15 crc kubenswrapper[4784]: I0106 08:32:15.986941 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-tqhzj" Jan 06 08:32:20 crc kubenswrapper[4784]: I0106 08:32:20.481117 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-568985c78-q4rwb" Jan 06 08:32:20 crc kubenswrapper[4784]: I0106 08:32:20.551372 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-pz42s" Jan 06 08:32:20 crc kubenswrapper[4784]: I0106 08:32:20.918669 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-r6lj7" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.411273 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-nvr55"] Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.413059 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-nvr55" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.423361 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-pnr4b" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.423830 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.424109 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.424334 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.431486 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-nvr55"] Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.486644 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7z5b\" (UniqueName: \"kubernetes.io/projected/4c96cb75-2040-47a1-add1-4a7d9658a490-kube-api-access-j7z5b\") pod \"dnsmasq-dns-84bb9d8bd9-nvr55\" (UID: \"4c96cb75-2040-47a1-add1-4a7d9658a490\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-nvr55" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.486997 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c96cb75-2040-47a1-add1-4a7d9658a490-config\") pod \"dnsmasq-dns-84bb9d8bd9-nvr55\" (UID: \"4c96cb75-2040-47a1-add1-4a7d9658a490\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-nvr55" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.495330 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-mwjmb"] Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.496594 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.501083 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.508030 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-mwjmb"] Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.588390 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7z5b\" (UniqueName: \"kubernetes.io/projected/4c96cb75-2040-47a1-add1-4a7d9658a490-kube-api-access-j7z5b\") pod \"dnsmasq-dns-84bb9d8bd9-nvr55\" (UID: \"4c96cb75-2040-47a1-add1-4a7d9658a490\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-nvr55" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.588480 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1931d0e0-77c0-437a-b1b2-c31ce41d381c-config\") pod \"dnsmasq-dns-5f854695bc-mwjmb\" (UID: \"1931d0e0-77c0-437a-b1b2-c31ce41d381c\") " pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.588514 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhg88\" (UniqueName: \"kubernetes.io/projected/1931d0e0-77c0-437a-b1b2-c31ce41d381c-kube-api-access-nhg88\") pod \"dnsmasq-dns-5f854695bc-mwjmb\" (UID: \"1931d0e0-77c0-437a-b1b2-c31ce41d381c\") " pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.588560 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1931d0e0-77c0-437a-b1b2-c31ce41d381c-dns-svc\") pod \"dnsmasq-dns-5f854695bc-mwjmb\" (UID: \"1931d0e0-77c0-437a-b1b2-c31ce41d381c\") " pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.588619 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c96cb75-2040-47a1-add1-4a7d9658a490-config\") pod \"dnsmasq-dns-84bb9d8bd9-nvr55\" (UID: \"4c96cb75-2040-47a1-add1-4a7d9658a490\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-nvr55" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.589710 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c96cb75-2040-47a1-add1-4a7d9658a490-config\") pod \"dnsmasq-dns-84bb9d8bd9-nvr55\" (UID: \"4c96cb75-2040-47a1-add1-4a7d9658a490\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-nvr55" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.614882 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7z5b\" (UniqueName: \"kubernetes.io/projected/4c96cb75-2040-47a1-add1-4a7d9658a490-kube-api-access-j7z5b\") pod \"dnsmasq-dns-84bb9d8bd9-nvr55\" (UID: \"4c96cb75-2040-47a1-add1-4a7d9658a490\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-nvr55" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.690616 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1931d0e0-77c0-437a-b1b2-c31ce41d381c-config\") pod \"dnsmasq-dns-5f854695bc-mwjmb\" (UID: \"1931d0e0-77c0-437a-b1b2-c31ce41d381c\") " pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.690703 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhg88\" (UniqueName: \"kubernetes.io/projected/1931d0e0-77c0-437a-b1b2-c31ce41d381c-kube-api-access-nhg88\") pod \"dnsmasq-dns-5f854695bc-mwjmb\" (UID: \"1931d0e0-77c0-437a-b1b2-c31ce41d381c\") " pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.690748 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1931d0e0-77c0-437a-b1b2-c31ce41d381c-dns-svc\") pod \"dnsmasq-dns-5f854695bc-mwjmb\" (UID: \"1931d0e0-77c0-437a-b1b2-c31ce41d381c\") " pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.691599 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1931d0e0-77c0-437a-b1b2-c31ce41d381c-config\") pod \"dnsmasq-dns-5f854695bc-mwjmb\" (UID: \"1931d0e0-77c0-437a-b1b2-c31ce41d381c\") " pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.692076 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1931d0e0-77c0-437a-b1b2-c31ce41d381c-dns-svc\") pod \"dnsmasq-dns-5f854695bc-mwjmb\" (UID: \"1931d0e0-77c0-437a-b1b2-c31ce41d381c\") " pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.709703 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhg88\" (UniqueName: \"kubernetes.io/projected/1931d0e0-77c0-437a-b1b2-c31ce41d381c-kube-api-access-nhg88\") pod \"dnsmasq-dns-5f854695bc-mwjmb\" (UID: \"1931d0e0-77c0-437a-b1b2-c31ce41d381c\") " pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.743105 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-nvr55" Jan 06 08:32:38 crc kubenswrapper[4784]: I0106 08:32:38.814619 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" Jan 06 08:32:39 crc kubenswrapper[4784]: I0106 08:32:39.225531 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-nvr55"] Jan 06 08:32:39 crc kubenswrapper[4784]: I0106 08:32:39.268968 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bb9d8bd9-nvr55" event={"ID":"4c96cb75-2040-47a1-add1-4a7d9658a490","Type":"ContainerStarted","Data":"6fc3a5f17aa34e528c767d51e41a806989f5a2864bb90d9e4b12de94c76c3439"} Jan 06 08:32:39 crc kubenswrapper[4784]: I0106 08:32:39.328391 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-mwjmb"] Jan 06 08:32:39 crc kubenswrapper[4784]: W0106 08:32:39.331105 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1931d0e0_77c0_437a_b1b2_c31ce41d381c.slice/crio-913cee3c56e99125a9167031c9275e112e1176c65b459f97c637ada90bc33093 WatchSource:0}: Error finding container 913cee3c56e99125a9167031c9275e112e1176c65b459f97c637ada90bc33093: Status 404 returned error can't find the container with id 913cee3c56e99125a9167031c9275e112e1176c65b459f97c637ada90bc33093 Jan 06 08:32:40 crc kubenswrapper[4784]: I0106 08:32:40.281807 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" event={"ID":"1931d0e0-77c0-437a-b1b2-c31ce41d381c","Type":"ContainerStarted","Data":"913cee3c56e99125a9167031c9275e112e1176c65b459f97c637ada90bc33093"} Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.082158 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-mwjmb"] Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.110349 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-2cj96"] Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.111923 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.123318 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-2cj96"] Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.142900 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2vxw\" (UniqueName: \"kubernetes.io/projected/72188083-213e-47d8-a5e9-acbf76f3bfd7-kube-api-access-q2vxw\") pod \"dnsmasq-dns-744ffd65bc-2cj96\" (UID: \"72188083-213e-47d8-a5e9-acbf76f3bfd7\") " pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.153649 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72188083-213e-47d8-a5e9-acbf76f3bfd7-dns-svc\") pod \"dnsmasq-dns-744ffd65bc-2cj96\" (UID: \"72188083-213e-47d8-a5e9-acbf76f3bfd7\") " pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.153685 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72188083-213e-47d8-a5e9-acbf76f3bfd7-config\") pod \"dnsmasq-dns-744ffd65bc-2cj96\" (UID: \"72188083-213e-47d8-a5e9-acbf76f3bfd7\") " pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.255377 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2vxw\" (UniqueName: \"kubernetes.io/projected/72188083-213e-47d8-a5e9-acbf76f3bfd7-kube-api-access-q2vxw\") pod \"dnsmasq-dns-744ffd65bc-2cj96\" (UID: \"72188083-213e-47d8-a5e9-acbf76f3bfd7\") " pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.255818 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72188083-213e-47d8-a5e9-acbf76f3bfd7-dns-svc\") pod \"dnsmasq-dns-744ffd65bc-2cj96\" (UID: \"72188083-213e-47d8-a5e9-acbf76f3bfd7\") " pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.255924 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72188083-213e-47d8-a5e9-acbf76f3bfd7-config\") pod \"dnsmasq-dns-744ffd65bc-2cj96\" (UID: \"72188083-213e-47d8-a5e9-acbf76f3bfd7\") " pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.257128 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72188083-213e-47d8-a5e9-acbf76f3bfd7-config\") pod \"dnsmasq-dns-744ffd65bc-2cj96\" (UID: \"72188083-213e-47d8-a5e9-acbf76f3bfd7\") " pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.259655 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72188083-213e-47d8-a5e9-acbf76f3bfd7-dns-svc\") pod \"dnsmasq-dns-744ffd65bc-2cj96\" (UID: \"72188083-213e-47d8-a5e9-acbf76f3bfd7\") " pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.283940 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2vxw\" (UniqueName: \"kubernetes.io/projected/72188083-213e-47d8-a5e9-acbf76f3bfd7-kube-api-access-q2vxw\") pod \"dnsmasq-dns-744ffd65bc-2cj96\" (UID: \"72188083-213e-47d8-a5e9-acbf76f3bfd7\") " pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.444516 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.468400 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-nvr55"] Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.493722 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-mn8nz"] Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.496586 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.519260 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-mn8nz"] Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.562608 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-dns-svc\") pod \"dnsmasq-dns-95f5f6995-mn8nz\" (UID: \"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601\") " pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.562727 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9v9x2\" (UniqueName: \"kubernetes.io/projected/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-kube-api-access-9v9x2\") pod \"dnsmasq-dns-95f5f6995-mn8nz\" (UID: \"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601\") " pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.562848 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-config\") pod \"dnsmasq-dns-95f5f6995-mn8nz\" (UID: \"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601\") " pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.664501 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-config\") pod \"dnsmasq-dns-95f5f6995-mn8nz\" (UID: \"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601\") " pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.664625 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-dns-svc\") pod \"dnsmasq-dns-95f5f6995-mn8nz\" (UID: \"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601\") " pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.664687 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9v9x2\" (UniqueName: \"kubernetes.io/projected/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-kube-api-access-9v9x2\") pod \"dnsmasq-dns-95f5f6995-mn8nz\" (UID: \"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601\") " pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.667179 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-dns-svc\") pod \"dnsmasq-dns-95f5f6995-mn8nz\" (UID: \"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601\") " pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.669578 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-config\") pod \"dnsmasq-dns-95f5f6995-mn8nz\" (UID: \"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601\") " pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.696745 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9v9x2\" (UniqueName: \"kubernetes.io/projected/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-kube-api-access-9v9x2\") pod \"dnsmasq-dns-95f5f6995-mn8nz\" (UID: \"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601\") " pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" Jan 06 08:32:41 crc kubenswrapper[4784]: I0106 08:32:41.855377 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.041884 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-2cj96"] Jan 06 08:32:42 crc kubenswrapper[4784]: W0106 08:32:42.066131 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod72188083_213e_47d8_a5e9_acbf76f3bfd7.slice/crio-bf75367d114631b61dde4ff861bed5745b365dc569b9e64c60bd0b350bc2860e WatchSource:0}: Error finding container bf75367d114631b61dde4ff861bed5745b365dc569b9e64c60bd0b350bc2860e: Status 404 returned error can't find the container with id bf75367d114631b61dde4ff861bed5745b365dc569b9e64c60bd0b350bc2860e Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.300614 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.302349 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.304887 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.305451 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.306498 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.309145 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.309377 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.309409 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.309657 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-frg2w" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.309838 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.350027 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" event={"ID":"72188083-213e-47d8-a5e9-acbf76f3bfd7","Type":"ContainerStarted","Data":"bf75367d114631b61dde4ff861bed5745b365dc569b9e64c60bd0b350bc2860e"} Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.376457 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.376504 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/052ecaa6-58fd-42ed-b2c5-6b8919470619-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.376685 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/052ecaa6-58fd-42ed-b2c5-6b8919470619-pod-info\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.376784 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-server-conf\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.376805 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.376843 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.376893 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.376919 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smlpk\" (UniqueName: \"kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-kube-api-access-smlpk\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.376938 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.376956 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.376972 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.406977 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-mn8nz"] Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.481341 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.481459 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/052ecaa6-58fd-42ed-b2c5-6b8919470619-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.481509 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/052ecaa6-58fd-42ed-b2c5-6b8919470619-pod-info\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.481630 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-server-conf\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.481658 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.481738 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.481787 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.481881 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smlpk\" (UniqueName: \"kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-kube-api-access-smlpk\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.481911 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.481970 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.481995 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.483223 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.483667 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.483997 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.484408 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.485453 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.488271 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-server-conf\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.491754 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.492236 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/052ecaa6-58fd-42ed-b2c5-6b8919470619-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.500723 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smlpk\" (UniqueName: \"kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-kube-api-access-smlpk\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.502593 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/052ecaa6-58fd-42ed-b2c5-6b8919470619-pod-info\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.507611 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.514144 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.640647 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.657451 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.661040 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.663356 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.667461 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.667845 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.668006 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.668125 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.668398 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.669026 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.674686 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-66w6f" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.786660 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.786738 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/41c89df0-d35f-4f47-86f3-71a2c0971d79-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.786787 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.786813 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.786870 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.786907 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.786934 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.786966 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/41c89df0-d35f-4f47-86f3-71a2c0971d79-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.787042 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.787071 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cn7b\" (UniqueName: \"kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-kube-api-access-5cn7b\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.787091 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.888915 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.888978 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cn7b\" (UniqueName: \"kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-kube-api-access-5cn7b\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.889005 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.889227 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.889263 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/41c89df0-d35f-4f47-86f3-71a2c0971d79-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.889296 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.889318 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.889370 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.889406 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.889433 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.889454 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/41c89df0-d35f-4f47-86f3-71a2c0971d79-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.891579 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.892036 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.892455 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.892823 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.893415 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.893493 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.897010 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.897839 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.900064 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/41c89df0-d35f-4f47-86f3-71a2c0971d79-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.902780 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/41c89df0-d35f-4f47-86f3-71a2c0971d79-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.909388 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cn7b\" (UniqueName: \"kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-kube-api-access-5cn7b\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.914525 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:42 crc kubenswrapper[4784]: I0106 08:32:42.994890 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.360411 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" event={"ID":"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601","Type":"ContainerStarted","Data":"45a4e6b5dbe27d7dac2c075f1621242b64c0bb214bf82bcef116327204ccfe02"} Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.829989 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.832457 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.861742 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.862050 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.862166 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-cpxlw" Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.862229 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.864826 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.866254 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.908026 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6a277ac-73de-4e2b-b39f-73d467b2222c-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.908066 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6a277ac-73de-4e2b-b39f-73d467b2222c-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.908087 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kj57t\" (UniqueName: \"kubernetes.io/projected/b6a277ac-73de-4e2b-b39f-73d467b2222c-kube-api-access-kj57t\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.908126 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-kolla-config\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.908166 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b6a277ac-73de-4e2b-b39f-73d467b2222c-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.908187 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-config-data-default\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.908213 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:43 crc kubenswrapper[4784]: I0106 08:32:43.908245 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.011009 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-kolla-config\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.011135 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b6a277ac-73de-4e2b-b39f-73d467b2222c-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.011174 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-config-data-default\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.011218 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.011265 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.011331 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6a277ac-73de-4e2b-b39f-73d467b2222c-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.011357 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6a277ac-73de-4e2b-b39f-73d467b2222c-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.011386 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kj57t\" (UniqueName: \"kubernetes.io/projected/b6a277ac-73de-4e2b-b39f-73d467b2222c-kube-api-access-kj57t\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.012064 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-kolla-config\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.012414 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.013342 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b6a277ac-73de-4e2b-b39f-73d467b2222c-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.014138 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-config-data-default\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.014992 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.020442 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6a277ac-73de-4e2b-b39f-73d467b2222c-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.033676 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kj57t\" (UniqueName: \"kubernetes.io/projected/b6a277ac-73de-4e2b-b39f-73d467b2222c-kube-api-access-kj57t\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.038068 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6a277ac-73de-4e2b-b39f-73d467b2222c-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.040970 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " pod="openstack/openstack-galera-0" Jan 06 08:32:44 crc kubenswrapper[4784]: I0106 08:32:44.171141 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.217604 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.221226 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.229605 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-hqzpp" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.229899 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.231915 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.233272 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.239317 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.336992 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.337049 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/32e811b4-672d-4aa2-905b-9406f594be5c-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.337113 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/32e811b4-672d-4aa2-905b-9406f594be5c-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.337131 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.337163 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32e811b4-672d-4aa2-905b-9406f594be5c-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.337293 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.337317 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.337371 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mf8zg\" (UniqueName: \"kubernetes.io/projected/32e811b4-672d-4aa2-905b-9406f594be5c-kube-api-access-mf8zg\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.406513 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.407475 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.413562 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.413669 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.415459 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-7srhk" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.445495 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.448678 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-memcached-tls-certs\") pod \"memcached-0\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.448750 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/32e811b4-672d-4aa2-905b-9406f594be5c-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.448813 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/32e811b4-672d-4aa2-905b-9406f594be5c-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.448838 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.448886 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32e811b4-672d-4aa2-905b-9406f594be5c-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.447322 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.448927 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-combined-ca-bundle\") pod \"memcached-0\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.445817 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.450483 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/32e811b4-672d-4aa2-905b-9406f594be5c-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.450745 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgc98\" (UniqueName: \"kubernetes.io/projected/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-kube-api-access-tgc98\") pod \"memcached-0\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.450790 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.450826 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.450854 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mf8zg\" (UniqueName: \"kubernetes.io/projected/32e811b4-672d-4aa2-905b-9406f594be5c-kube-api-access-mf8zg\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.450922 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-kolla-config\") pod \"memcached-0\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.450942 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-config-data\") pod \"memcached-0\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.451305 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.451624 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.452176 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.480058 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32e811b4-672d-4aa2-905b-9406f594be5c-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.482178 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mf8zg\" (UniqueName: \"kubernetes.io/projected/32e811b4-672d-4aa2-905b-9406f594be5c-kube-api-access-mf8zg\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.490506 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.495302 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/32e811b4-672d-4aa2-905b-9406f594be5c-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.552944 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-kolla-config\") pod \"memcached-0\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.553012 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-config-data\") pod \"memcached-0\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.553070 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-memcached-tls-certs\") pod \"memcached-0\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.553152 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-combined-ca-bundle\") pod \"memcached-0\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.553199 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgc98\" (UniqueName: \"kubernetes.io/projected/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-kube-api-access-tgc98\") pod \"memcached-0\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.556761 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-kolla-config\") pod \"memcached-0\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.558167 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-memcached-tls-certs\") pod \"memcached-0\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.558183 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-config-data\") pod \"memcached-0\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.562077 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.572142 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgc98\" (UniqueName: \"kubernetes.io/projected/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-kube-api-access-tgc98\") pod \"memcached-0\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.576314 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-combined-ca-bundle\") pod \"memcached-0\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " pod="openstack/memcached-0" Jan 06 08:32:45 crc kubenswrapper[4784]: I0106 08:32:45.730841 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 06 08:32:47 crc kubenswrapper[4784]: I0106 08:32:47.297483 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 06 08:32:47 crc kubenswrapper[4784]: I0106 08:32:47.298822 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 06 08:32:47 crc kubenswrapper[4784]: I0106 08:32:47.308690 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 06 08:32:47 crc kubenswrapper[4784]: I0106 08:32:47.310955 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-g5bw7" Jan 06 08:32:47 crc kubenswrapper[4784]: I0106 08:32:47.392389 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlcs5\" (UniqueName: \"kubernetes.io/projected/eaa292ea-ca6c-44b0-9dc5-b4436c657c3f-kube-api-access-tlcs5\") pod \"kube-state-metrics-0\" (UID: \"eaa292ea-ca6c-44b0-9dc5-b4436c657c3f\") " pod="openstack/kube-state-metrics-0" Jan 06 08:32:47 crc kubenswrapper[4784]: I0106 08:32:47.493535 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlcs5\" (UniqueName: \"kubernetes.io/projected/eaa292ea-ca6c-44b0-9dc5-b4436c657c3f-kube-api-access-tlcs5\") pod \"kube-state-metrics-0\" (UID: \"eaa292ea-ca6c-44b0-9dc5-b4436c657c3f\") " pod="openstack/kube-state-metrics-0" Jan 06 08:32:47 crc kubenswrapper[4784]: I0106 08:32:47.518408 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlcs5\" (UniqueName: \"kubernetes.io/projected/eaa292ea-ca6c-44b0-9dc5-b4436c657c3f-kube-api-access-tlcs5\") pod \"kube-state-metrics-0\" (UID: \"eaa292ea-ca6c-44b0-9dc5-b4436c657c3f\") " pod="openstack/kube-state-metrics-0" Jan 06 08:32:47 crc kubenswrapper[4784]: I0106 08:32:47.615828 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.863018 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-8tvjg"] Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.864940 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.868244 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.868422 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-c72lt" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.868682 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.879044 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-2n9kz"] Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.881432 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.887260 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8tvjg"] Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.909726 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-2n9kz"] Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.965424 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-log-ovn\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.965514 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da17dffd-4ff8-4df2-8701-2e910a4c5131-scripts\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.965596 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-run-ovn\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.965649 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-run\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.965754 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ef35db6-a440-4394-a26f-750a29488828-combined-ca-bundle\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.965805 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0ef35db6-a440-4394-a26f-750a29488828-scripts\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.965900 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qbxq\" (UniqueName: \"kubernetes.io/projected/0ef35db6-a440-4394-a26f-750a29488828-kube-api-access-8qbxq\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.965962 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ef35db6-a440-4394-a26f-750a29488828-ovn-controller-tls-certs\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.966065 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-log\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.972749 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-lib\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.972946 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-run\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.973003 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-etc-ovs\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:50 crc kubenswrapper[4784]: I0106 08:32:50.973024 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpm56\" (UniqueName: \"kubernetes.io/projected/da17dffd-4ff8-4df2-8701-2e910a4c5131-kube-api-access-bpm56\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.075089 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-lib\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.075197 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-run\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.075231 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-etc-ovs\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.075250 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpm56\" (UniqueName: \"kubernetes.io/projected/da17dffd-4ff8-4df2-8701-2e910a4c5131-kube-api-access-bpm56\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.075291 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-log-ovn\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.075319 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da17dffd-4ff8-4df2-8701-2e910a4c5131-scripts\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.075347 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-run-ovn\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.075378 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-run\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.075424 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ef35db6-a440-4394-a26f-750a29488828-combined-ca-bundle\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.075854 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-etc-ovs\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.075969 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-run\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.076100 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-run-ovn\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.076116 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-run\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.076182 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0ef35db6-a440-4394-a26f-750a29488828-scripts\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.076258 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qbxq\" (UniqueName: \"kubernetes.io/projected/0ef35db6-a440-4394-a26f-750a29488828-kube-api-access-8qbxq\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.076119 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-log-ovn\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.076286 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ef35db6-a440-4394-a26f-750a29488828-ovn-controller-tls-certs\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.076754 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-log\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.076777 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-lib\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.076959 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-log\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.078219 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da17dffd-4ff8-4df2-8701-2e910a4c5131-scripts\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.078395 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0ef35db6-a440-4394-a26f-750a29488828-scripts\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.087862 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ef35db6-a440-4394-a26f-750a29488828-ovn-controller-tls-certs\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.095612 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ef35db6-a440-4394-a26f-750a29488828-combined-ca-bundle\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.097970 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpm56\" (UniqueName: \"kubernetes.io/projected/da17dffd-4ff8-4df2-8701-2e910a4c5131-kube-api-access-bpm56\") pod \"ovn-controller-ovs-2n9kz\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.102704 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qbxq\" (UniqueName: \"kubernetes.io/projected/0ef35db6-a440-4394-a26f-750a29488828-kube-api-access-8qbxq\") pod \"ovn-controller-8tvjg\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.192229 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8tvjg" Jan 06 08:32:51 crc kubenswrapper[4784]: I0106 08:32:51.199592 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.190020 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.191190 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.193606 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.194203 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.195689 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.195755 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.196044 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-v84xs" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.212359 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.300634 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.300682 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/700a4853-cbd0-4cc6-8322-d9296caadf34-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.300745 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/700a4853-cbd0-4cc6-8322-d9296caadf34-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.300789 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.300813 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hx2j\" (UniqueName: \"kubernetes.io/projected/700a4853-cbd0-4cc6-8322-d9296caadf34-kube-api-access-2hx2j\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.300842 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.300874 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/700a4853-cbd0-4cc6-8322-d9296caadf34-config\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.300894 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.402756 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.402806 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/700a4853-cbd0-4cc6-8322-d9296caadf34-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.402907 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/700a4853-cbd0-4cc6-8322-d9296caadf34-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.402981 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.403055 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hx2j\" (UniqueName: \"kubernetes.io/projected/700a4853-cbd0-4cc6-8322-d9296caadf34-kube-api-access-2hx2j\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.403091 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.403126 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/700a4853-cbd0-4cc6-8322-d9296caadf34-config\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.403149 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.403635 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/700a4853-cbd0-4cc6-8322-d9296caadf34-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.403699 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.404393 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/700a4853-cbd0-4cc6-8322-d9296caadf34-config\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.404656 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/700a4853-cbd0-4cc6-8322-d9296caadf34-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.407420 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.408169 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.421519 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.425849 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hx2j\" (UniqueName: \"kubernetes.io/projected/700a4853-cbd0-4cc6-8322-d9296caadf34-kube-api-access-2hx2j\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.454328 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:52 crc kubenswrapper[4784]: I0106 08:32:52.516182 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.036751 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.748644 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.752755 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.757160 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-8gjmx" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.757511 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.757731 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.757989 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.763695 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.834898 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce785668-f5b3-4be6-b466-d1041d0190d1-config\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.834950 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.835198 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6n9sb\" (UniqueName: \"kubernetes.io/projected/ce785668-f5b3-4be6-b466-d1041d0190d1-kube-api-access-6n9sb\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.835683 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.835780 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.835935 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.835977 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ce785668-f5b3-4be6-b466-d1041d0190d1-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.836023 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce785668-f5b3-4be6-b466-d1041d0190d1-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.940338 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6n9sb\" (UniqueName: \"kubernetes.io/projected/ce785668-f5b3-4be6-b466-d1041d0190d1-kube-api-access-6n9sb\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.940488 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.940526 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.940609 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.940933 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.942082 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ce785668-f5b3-4be6-b466-d1041d0190d1-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.942622 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce785668-f5b3-4be6-b466-d1041d0190d1-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.942703 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ce785668-f5b3-4be6-b466-d1041d0190d1-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.943338 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce785668-f5b3-4be6-b466-d1041d0190d1-config\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.943386 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.943791 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce785668-f5b3-4be6-b466-d1041d0190d1-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.944457 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce785668-f5b3-4be6-b466-d1041d0190d1-config\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.954171 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.954972 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.957456 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.958098 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6n9sb\" (UniqueName: \"kubernetes.io/projected/ce785668-f5b3-4be6-b466-d1041d0190d1-kube-api-access-6n9sb\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:53 crc kubenswrapper[4784]: I0106 08:32:53.980728 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:54 crc kubenswrapper[4784]: I0106 08:32:54.082146 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 06 08:32:59 crc kubenswrapper[4784]: W0106 08:32:59.021118 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod052ecaa6_58fd_42ed_b2c5_6b8919470619.slice/crio-9dd22c7b81cfee4d0292f41912be1d5cc14198cc7a7057f44c9ea482f9c856cd WatchSource:0}: Error finding container 9dd22c7b81cfee4d0292f41912be1d5cc14198cc7a7057f44c9ea482f9c856cd: Status 404 returned error can't find the container with id 9dd22c7b81cfee4d0292f41912be1d5cc14198cc7a7057f44c9ea482f9c856cd Jan 06 08:32:59 crc kubenswrapper[4784]: I0106 08:32:59.504064 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 06 08:32:59 crc kubenswrapper[4784]: I0106 08:32:59.507947 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"052ecaa6-58fd-42ed-b2c5-6b8919470619","Type":"ContainerStarted","Data":"9dd22c7b81cfee4d0292f41912be1d5cc14198cc7a7057f44c9ea482f9c856cd"} Jan 06 08:33:00 crc kubenswrapper[4784]: E0106 08:33:00.072831 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 06 08:33:00 crc kubenswrapper[4784]: E0106 08:33:00.073014 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j7z5b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-84bb9d8bd9-nvr55_openstack(4c96cb75-2040-47a1-add1-4a7d9658a490): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 06 08:33:00 crc kubenswrapper[4784]: E0106 08:33:00.074213 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-84bb9d8bd9-nvr55" podUID="4c96cb75-2040-47a1-add1-4a7d9658a490" Jan 06 08:33:00 crc kubenswrapper[4784]: E0106 08:33:00.077119 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 06 08:33:00 crc kubenswrapper[4784]: E0106 08:33:00.077253 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nhg88,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5f854695bc-mwjmb_openstack(1931d0e0-77c0-437a-b1b2-c31ce41d381c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 06 08:33:00 crc kubenswrapper[4784]: E0106 08:33:00.078626 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" podUID="1931d0e0-77c0-437a-b1b2-c31ce41d381c" Jan 06 08:33:00 crc kubenswrapper[4784]: E0106 08:33:00.089392 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 06 08:33:00 crc kubenswrapper[4784]: E0106 08:33:00.089638 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q2vxw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-744ffd65bc-2cj96_openstack(72188083-213e-47d8-a5e9-acbf76f3bfd7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 06 08:33:00 crc kubenswrapper[4784]: E0106 08:33:00.090967 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" podUID="72188083-213e-47d8-a5e9-acbf76f3bfd7" Jan 06 08:33:00 crc kubenswrapper[4784]: I0106 08:33:00.530890 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"32e811b4-672d-4aa2-905b-9406f594be5c","Type":"ContainerStarted","Data":"c751a3aefb774c2a1e0749606c08e5fa9157a8c35e18468a959564cb251f7415"} Jan 06 08:33:00 crc kubenswrapper[4784]: I0106 08:33:00.537775 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" event={"ID":"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601","Type":"ContainerStarted","Data":"671dbde473e8bb8f9e2f26df0474c6ced5a4a73e338b44933666a5404a42b753"} Jan 06 08:33:00 crc kubenswrapper[4784]: I0106 08:33:00.706809 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8tvjg"] Jan 06 08:33:00 crc kubenswrapper[4784]: I0106 08:33:00.729341 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 06 08:33:00 crc kubenswrapper[4784]: I0106 08:33:00.743337 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 06 08:33:00 crc kubenswrapper[4784]: I0106 08:33:00.822609 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 06 08:33:00 crc kubenswrapper[4784]: E0106 08:33:00.845707 4784 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Jan 06 08:33:00 crc kubenswrapper[4784]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/72188083-213e-47d8-a5e9-acbf76f3bfd7/volume-subpaths/dns-svc/init/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 06 08:33:00 crc kubenswrapper[4784]: > podSandboxID="bf75367d114631b61dde4ff861bed5745b365dc569b9e64c60bd0b350bc2860e" Jan 06 08:33:00 crc kubenswrapper[4784]: E0106 08:33:00.845934 4784 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 06 08:33:00 crc kubenswrapper[4784]: init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q2vxw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-744ffd65bc-2cj96_openstack(72188083-213e-47d8-a5e9-acbf76f3bfd7): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/72188083-213e-47d8-a5e9-acbf76f3bfd7/volume-subpaths/dns-svc/init/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 06 08:33:00 crc kubenswrapper[4784]: > logger="UnhandledError" Jan 06 08:33:00 crc kubenswrapper[4784]: E0106 08:33:00.847463 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/72188083-213e-47d8-a5e9-acbf76f3bfd7/volume-subpaths/dns-svc/init/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" podUID="72188083-213e-47d8-a5e9-acbf76f3bfd7" Jan 06 08:33:00 crc kubenswrapper[4784]: W0106 08:33:00.869435 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod700a4853_cbd0_4cc6_8322_d9296caadf34.slice/crio-63af77200422878343a76de44c720e0eefbed1145e23a334095415ffe6523383 WatchSource:0}: Error finding container 63af77200422878343a76de44c720e0eefbed1145e23a334095415ffe6523383: Status 404 returned error can't find the container with id 63af77200422878343a76de44c720e0eefbed1145e23a334095415ffe6523383 Jan 06 08:33:00 crc kubenswrapper[4784]: I0106 08:33:00.944752 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-nvr55" Jan 06 08:33:00 crc kubenswrapper[4784]: I0106 08:33:00.960107 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.049709 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c96cb75-2040-47a1-add1-4a7d9658a490-config\") pod \"4c96cb75-2040-47a1-add1-4a7d9658a490\" (UID: \"4c96cb75-2040-47a1-add1-4a7d9658a490\") " Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.049779 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1931d0e0-77c0-437a-b1b2-c31ce41d381c-dns-svc\") pod \"1931d0e0-77c0-437a-b1b2-c31ce41d381c\" (UID: \"1931d0e0-77c0-437a-b1b2-c31ce41d381c\") " Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.049820 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhg88\" (UniqueName: \"kubernetes.io/projected/1931d0e0-77c0-437a-b1b2-c31ce41d381c-kube-api-access-nhg88\") pod \"1931d0e0-77c0-437a-b1b2-c31ce41d381c\" (UID: \"1931d0e0-77c0-437a-b1b2-c31ce41d381c\") " Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.049884 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7z5b\" (UniqueName: \"kubernetes.io/projected/4c96cb75-2040-47a1-add1-4a7d9658a490-kube-api-access-j7z5b\") pod \"4c96cb75-2040-47a1-add1-4a7d9658a490\" (UID: \"4c96cb75-2040-47a1-add1-4a7d9658a490\") " Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.049974 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1931d0e0-77c0-437a-b1b2-c31ce41d381c-config\") pod \"1931d0e0-77c0-437a-b1b2-c31ce41d381c\" (UID: \"1931d0e0-77c0-437a-b1b2-c31ce41d381c\") " Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.050953 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1931d0e0-77c0-437a-b1b2-c31ce41d381c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1931d0e0-77c0-437a-b1b2-c31ce41d381c" (UID: "1931d0e0-77c0-437a-b1b2-c31ce41d381c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.051153 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c96cb75-2040-47a1-add1-4a7d9658a490-config" (OuterVolumeSpecName: "config") pod "4c96cb75-2040-47a1-add1-4a7d9658a490" (UID: "4c96cb75-2040-47a1-add1-4a7d9658a490"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.051203 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1931d0e0-77c0-437a-b1b2-c31ce41d381c-config" (OuterVolumeSpecName: "config") pod "1931d0e0-77c0-437a-b1b2-c31ce41d381c" (UID: "1931d0e0-77c0-437a-b1b2-c31ce41d381c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.057070 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1931d0e0-77c0-437a-b1b2-c31ce41d381c-kube-api-access-nhg88" (OuterVolumeSpecName: "kube-api-access-nhg88") pod "1931d0e0-77c0-437a-b1b2-c31ce41d381c" (UID: "1931d0e0-77c0-437a-b1b2-c31ce41d381c"). InnerVolumeSpecName "kube-api-access-nhg88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.057290 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c96cb75-2040-47a1-add1-4a7d9658a490-kube-api-access-j7z5b" (OuterVolumeSpecName: "kube-api-access-j7z5b") pod "4c96cb75-2040-47a1-add1-4a7d9658a490" (UID: "4c96cb75-2040-47a1-add1-4a7d9658a490"). InnerVolumeSpecName "kube-api-access-j7z5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.134501 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.152458 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhg88\" (UniqueName: \"kubernetes.io/projected/1931d0e0-77c0-437a-b1b2-c31ce41d381c-kube-api-access-nhg88\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.152495 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7z5b\" (UniqueName: \"kubernetes.io/projected/4c96cb75-2040-47a1-add1-4a7d9658a490-kube-api-access-j7z5b\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.152510 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1931d0e0-77c0-437a-b1b2-c31ce41d381c-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.152523 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c96cb75-2040-47a1-add1-4a7d9658a490-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.152534 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1931d0e0-77c0-437a-b1b2-c31ce41d381c-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.155439 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 06 08:33:01 crc kubenswrapper[4784]: W0106 08:33:01.171822 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6a277ac_73de_4e2b_b39f_73d467b2222c.slice/crio-355be865b4877b40ac6bde96b2a5dd704306f244716ef6edd16465fad9a403dd WatchSource:0}: Error finding container 355be865b4877b40ac6bde96b2a5dd704306f244716ef6edd16465fad9a403dd: Status 404 returned error can't find the container with id 355be865b4877b40ac6bde96b2a5dd704306f244716ef6edd16465fad9a403dd Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.244881 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.580076 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" event={"ID":"1931d0e0-77c0-437a-b1b2-c31ce41d381c","Type":"ContainerDied","Data":"913cee3c56e99125a9167031c9275e112e1176c65b459f97c637ada90bc33093"} Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.580660 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-mwjmb" Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.605608 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"3627acbf-1c12-4e8e-97f0-e44a6cd124c3","Type":"ContainerStarted","Data":"d297b8b13f5f10e37279f9ad278dd1765f896054c3c6c4059cfdcd26280764b0"} Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.617875 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8tvjg" event={"ID":"0ef35db6-a440-4394-a26f-750a29488828","Type":"ContainerStarted","Data":"1b54fdb7264ff641fcf407364676075ad525b00b1f338e708bcc2088e2b91304"} Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.629759 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bb9d8bd9-nvr55" event={"ID":"4c96cb75-2040-47a1-add1-4a7d9658a490","Type":"ContainerDied","Data":"6fc3a5f17aa34e528c767d51e41a806989f5a2864bb90d9e4b12de94c76c3439"} Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.629983 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-nvr55" Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.687945 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ce785668-f5b3-4be6-b466-d1041d0190d1","Type":"ContainerStarted","Data":"1b8c82e0d4727bf379a7fad1966e194ec6268615fe0db644593629019274a94f"} Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.693888 4784 generic.go:334] "Generic (PLEG): container finished" podID="75b5fcc5-44e3-4ae8-8e23-5f3654a2f601" containerID="671dbde473e8bb8f9e2f26df0474c6ced5a4a73e338b44933666a5404a42b753" exitCode=0 Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.693973 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" event={"ID":"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601","Type":"ContainerDied","Data":"671dbde473e8bb8f9e2f26df0474c6ced5a4a73e338b44933666a5404a42b753"} Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.694008 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" event={"ID":"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601","Type":"ContainerStarted","Data":"69fa32b0be31bf4c9776792cba40f8ee5098ba12abaa2327064fb250193add1e"} Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.695087 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.697874 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-mwjmb"] Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.700117 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"41c89df0-d35f-4f47-86f3-71a2c0971d79","Type":"ContainerStarted","Data":"47f899ad74f75b80ff11af11be7d255985dd06ec857a6ea9ad0b8b936409fcef"} Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.701781 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"eaa292ea-ca6c-44b0-9dc5-b4436c657c3f","Type":"ContainerStarted","Data":"21942dbe0e457cd19ecf3fa0e86ded5141c639e448dd08176cf8353f00376d0c"} Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.705566 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-mwjmb"] Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.708218 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b6a277ac-73de-4e2b-b39f-73d467b2222c","Type":"ContainerStarted","Data":"355be865b4877b40ac6bde96b2a5dd704306f244716ef6edd16465fad9a403dd"} Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.710215 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"700a4853-cbd0-4cc6-8322-d9296caadf34","Type":"ContainerStarted","Data":"63af77200422878343a76de44c720e0eefbed1145e23a334095415ffe6523383"} Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.735285 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" podStartSLOduration=2.980410173 podStartE2EDuration="20.735257374s" podCreationTimestamp="2026-01-06 08:32:41 +0000 UTC" firstStartedPulling="2026-01-06 08:32:42.416104044 +0000 UTC m=+1064.462276881" lastFinishedPulling="2026-01-06 08:33:00.170951245 +0000 UTC m=+1082.217124082" observedRunningTime="2026-01-06 08:33:01.731476266 +0000 UTC m=+1083.777649113" watchObservedRunningTime="2026-01-06 08:33:01.735257374 +0000 UTC m=+1083.781430211" Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.777169 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-2n9kz"] Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.791584 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-nvr55"] Jan 06 08:33:01 crc kubenswrapper[4784]: I0106 08:33:01.795396 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-nvr55"] Jan 06 08:33:01 crc kubenswrapper[4784]: W0106 08:33:01.805248 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda17dffd_4ff8_4df2_8701_2e910a4c5131.slice/crio-eb661efc6a61209fd0abdd310f5b4a6894bbc30a550cee838d40a6cc025f9b13 WatchSource:0}: Error finding container eb661efc6a61209fd0abdd310f5b4a6894bbc30a550cee838d40a6cc025f9b13: Status 404 returned error can't find the container with id eb661efc6a61209fd0abdd310f5b4a6894bbc30a550cee838d40a6cc025f9b13 Jan 06 08:33:02 crc kubenswrapper[4784]: I0106 08:33:02.336625 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1931d0e0-77c0-437a-b1b2-c31ce41d381c" path="/var/lib/kubelet/pods/1931d0e0-77c0-437a-b1b2-c31ce41d381c/volumes" Jan 06 08:33:02 crc kubenswrapper[4784]: I0106 08:33:02.337304 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c96cb75-2040-47a1-add1-4a7d9658a490" path="/var/lib/kubelet/pods/4c96cb75-2040-47a1-add1-4a7d9658a490/volumes" Jan 06 08:33:02 crc kubenswrapper[4784]: I0106 08:33:02.727363 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2n9kz" event={"ID":"da17dffd-4ff8-4df2-8701-2e910a4c5131","Type":"ContainerStarted","Data":"eb661efc6a61209fd0abdd310f5b4a6894bbc30a550cee838d40a6cc025f9b13"} Jan 06 08:33:06 crc kubenswrapper[4784]: I0106 08:33:06.856865 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" Jan 06 08:33:06 crc kubenswrapper[4784]: I0106 08:33:06.937920 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-2cj96"] Jan 06 08:33:08 crc kubenswrapper[4784]: I0106 08:33:08.291110 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" Jan 06 08:33:08 crc kubenswrapper[4784]: I0106 08:33:08.434748 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2vxw\" (UniqueName: \"kubernetes.io/projected/72188083-213e-47d8-a5e9-acbf76f3bfd7-kube-api-access-q2vxw\") pod \"72188083-213e-47d8-a5e9-acbf76f3bfd7\" (UID: \"72188083-213e-47d8-a5e9-acbf76f3bfd7\") " Jan 06 08:33:08 crc kubenswrapper[4784]: I0106 08:33:08.434807 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72188083-213e-47d8-a5e9-acbf76f3bfd7-dns-svc\") pod \"72188083-213e-47d8-a5e9-acbf76f3bfd7\" (UID: \"72188083-213e-47d8-a5e9-acbf76f3bfd7\") " Jan 06 08:33:08 crc kubenswrapper[4784]: I0106 08:33:08.434971 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72188083-213e-47d8-a5e9-acbf76f3bfd7-config\") pod \"72188083-213e-47d8-a5e9-acbf76f3bfd7\" (UID: \"72188083-213e-47d8-a5e9-acbf76f3bfd7\") " Jan 06 08:33:08 crc kubenswrapper[4784]: I0106 08:33:08.447253 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72188083-213e-47d8-a5e9-acbf76f3bfd7-kube-api-access-q2vxw" (OuterVolumeSpecName: "kube-api-access-q2vxw") pod "72188083-213e-47d8-a5e9-acbf76f3bfd7" (UID: "72188083-213e-47d8-a5e9-acbf76f3bfd7"). InnerVolumeSpecName "kube-api-access-q2vxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:08 crc kubenswrapper[4784]: I0106 08:33:08.462862 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72188083-213e-47d8-a5e9-acbf76f3bfd7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "72188083-213e-47d8-a5e9-acbf76f3bfd7" (UID: "72188083-213e-47d8-a5e9-acbf76f3bfd7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:08 crc kubenswrapper[4784]: I0106 08:33:08.480918 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72188083-213e-47d8-a5e9-acbf76f3bfd7-config" (OuterVolumeSpecName: "config") pod "72188083-213e-47d8-a5e9-acbf76f3bfd7" (UID: "72188083-213e-47d8-a5e9-acbf76f3bfd7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:08 crc kubenswrapper[4784]: I0106 08:33:08.537197 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72188083-213e-47d8-a5e9-acbf76f3bfd7-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:08 crc kubenswrapper[4784]: I0106 08:33:08.537248 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2vxw\" (UniqueName: \"kubernetes.io/projected/72188083-213e-47d8-a5e9-acbf76f3bfd7-kube-api-access-q2vxw\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:08 crc kubenswrapper[4784]: I0106 08:33:08.537263 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72188083-213e-47d8-a5e9-acbf76f3bfd7-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:08 crc kubenswrapper[4784]: I0106 08:33:08.789755 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" event={"ID":"72188083-213e-47d8-a5e9-acbf76f3bfd7","Type":"ContainerDied","Data":"bf75367d114631b61dde4ff861bed5745b365dc569b9e64c60bd0b350bc2860e"} Jan 06 08:33:08 crc kubenswrapper[4784]: I0106 08:33:08.789860 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-2cj96" Jan 06 08:33:08 crc kubenswrapper[4784]: I0106 08:33:08.865099 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-2cj96"] Jan 06 08:33:08 crc kubenswrapper[4784]: I0106 08:33:08.868020 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-2cj96"] Jan 06 08:33:10 crc kubenswrapper[4784]: I0106 08:33:10.327269 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72188083-213e-47d8-a5e9-acbf76f3bfd7" path="/var/lib/kubelet/pods/72188083-213e-47d8-a5e9-acbf76f3bfd7/volumes" Jan 06 08:33:11 crc kubenswrapper[4784]: I0106 08:33:11.821639 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ce785668-f5b3-4be6-b466-d1041d0190d1","Type":"ContainerStarted","Data":"e1e26be922c57e54539f2aa8adb35b7ddeaf1093c84fe1a99379176abf764f01"} Jan 06 08:33:11 crc kubenswrapper[4784]: I0106 08:33:11.825843 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"32e811b4-672d-4aa2-905b-9406f594be5c","Type":"ContainerStarted","Data":"cc2cd38ef6247570e98675603c9844d91f21cf012397d97638c919f649c76139"} Jan 06 08:33:11 crc kubenswrapper[4784]: I0106 08:33:11.828441 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"3627acbf-1c12-4e8e-97f0-e44a6cd124c3","Type":"ContainerStarted","Data":"4287d43d16c939ca0cffadb1469454f8c0eafb31a4cd129fbdd4830a7590fe28"} Jan 06 08:33:11 crc kubenswrapper[4784]: I0106 08:33:11.828621 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 06 08:33:11 crc kubenswrapper[4784]: I0106 08:33:11.831104 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"eaa292ea-ca6c-44b0-9dc5-b4436c657c3f","Type":"ContainerStarted","Data":"45d05df335bdaf2ee6d1081eceeb0fc66d2d5184d9c6e77356e4a0e16e487c5d"} Jan 06 08:33:11 crc kubenswrapper[4784]: I0106 08:33:11.831293 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 06 08:33:11 crc kubenswrapper[4784]: I0106 08:33:11.833208 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2n9kz" event={"ID":"da17dffd-4ff8-4df2-8701-2e910a4c5131","Type":"ContainerStarted","Data":"26b64c3522b89267f29da3ed866ae7f9fcf46409fd5357b324294ff85698d46c"} Jan 06 08:33:11 crc kubenswrapper[4784]: I0106 08:33:11.835520 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b6a277ac-73de-4e2b-b39f-73d467b2222c","Type":"ContainerStarted","Data":"5906b3d46bac70f13bc27fb60444d1ee0a413a195f0184464f3dcff5699e7583"} Jan 06 08:33:11 crc kubenswrapper[4784]: I0106 08:33:11.838011 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"700a4853-cbd0-4cc6-8322-d9296caadf34","Type":"ContainerStarted","Data":"2df75ac1bbb78adf7fae00bb5912e0ed3ad738ffa37d0df9b3d4f689510b22fa"} Jan 06 08:33:11 crc kubenswrapper[4784]: I0106 08:33:11.888165 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=15.251565868 podStartE2EDuration="24.888124134s" podCreationTimestamp="2026-01-06 08:32:47 +0000 UTC" firstStartedPulling="2026-01-06 08:33:00.755061678 +0000 UTC m=+1082.801234515" lastFinishedPulling="2026-01-06 08:33:10.391619944 +0000 UTC m=+1092.437792781" observedRunningTime="2026-01-06 08:33:11.876924963 +0000 UTC m=+1093.923097810" watchObservedRunningTime="2026-01-06 08:33:11.888124134 +0000 UTC m=+1093.934296981" Jan 06 08:33:11 crc kubenswrapper[4784]: I0106 08:33:11.993912 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=18.369444090000002 podStartE2EDuration="26.993881431s" podCreationTimestamp="2026-01-06 08:32:45 +0000 UTC" firstStartedPulling="2026-01-06 08:33:00.744364463 +0000 UTC m=+1082.790537300" lastFinishedPulling="2026-01-06 08:33:09.368801804 +0000 UTC m=+1091.414974641" observedRunningTime="2026-01-06 08:33:11.960242819 +0000 UTC m=+1094.006415676" watchObservedRunningTime="2026-01-06 08:33:11.993881431 +0000 UTC m=+1094.040054268" Jan 06 08:33:12 crc kubenswrapper[4784]: I0106 08:33:12.847995 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8tvjg" event={"ID":"0ef35db6-a440-4394-a26f-750a29488828","Type":"ContainerStarted","Data":"18606de9cd379cec6d03e611706882cefcfe82d03d59dd6bd082573a183009d9"} Jan 06 08:33:12 crc kubenswrapper[4784]: I0106 08:33:12.850316 4784 generic.go:334] "Generic (PLEG): container finished" podID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerID="26b64c3522b89267f29da3ed866ae7f9fcf46409fd5357b324294ff85698d46c" exitCode=0 Jan 06 08:33:12 crc kubenswrapper[4784]: I0106 08:33:12.850386 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2n9kz" event={"ID":"da17dffd-4ff8-4df2-8701-2e910a4c5131","Type":"ContainerDied","Data":"26b64c3522b89267f29da3ed866ae7f9fcf46409fd5357b324294ff85698d46c"} Jan 06 08:33:12 crc kubenswrapper[4784]: I0106 08:33:12.853361 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"052ecaa6-58fd-42ed-b2c5-6b8919470619","Type":"ContainerStarted","Data":"613af1447384aa02c92ffc00120a9eb3d6a1362e2f325edc92e8fcc3a2447c9f"} Jan 06 08:33:12 crc kubenswrapper[4784]: I0106 08:33:12.857339 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"41c89df0-d35f-4f47-86f3-71a2c0971d79","Type":"ContainerStarted","Data":"bd03e04330f8e1c997eb9c5a6519ec44ddc8665c10bea822cf494fdb01acc628"} Jan 06 08:33:12 crc kubenswrapper[4784]: I0106 08:33:12.873455 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-8tvjg" podStartSLOduration=13.774259205 podStartE2EDuration="22.87343315s" podCreationTimestamp="2026-01-06 08:32:50 +0000 UTC" firstStartedPulling="2026-01-06 08:33:00.730714497 +0000 UTC m=+1082.776887334" lastFinishedPulling="2026-01-06 08:33:09.829888442 +0000 UTC m=+1091.876061279" observedRunningTime="2026-01-06 08:33:12.870094107 +0000 UTC m=+1094.916266954" watchObservedRunningTime="2026-01-06 08:33:12.87343315 +0000 UTC m=+1094.919605987" Jan 06 08:33:13 crc kubenswrapper[4784]: I0106 08:33:13.868498 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2n9kz" event={"ID":"da17dffd-4ff8-4df2-8701-2e910a4c5131","Type":"ContainerStarted","Data":"d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83"} Jan 06 08:33:13 crc kubenswrapper[4784]: I0106 08:33:13.869338 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-8tvjg" Jan 06 08:33:14 crc kubenswrapper[4784]: I0106 08:33:14.882769 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"700a4853-cbd0-4cc6-8322-d9296caadf34","Type":"ContainerStarted","Data":"a7e71ce3ce6e75c5e79d87518db38bd414fa98abc85f8580bc7cbfd40aaa0044"} Jan 06 08:33:14 crc kubenswrapper[4784]: I0106 08:33:14.884825 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ce785668-f5b3-4be6-b466-d1041d0190d1","Type":"ContainerStarted","Data":"85ff42bd642efa429ae58eddee287d22c43869bed771a6ed4057e860b56b4123"} Jan 06 08:33:14 crc kubenswrapper[4784]: I0106 08:33:14.889081 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2n9kz" event={"ID":"da17dffd-4ff8-4df2-8701-2e910a4c5131","Type":"ContainerStarted","Data":"2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab"} Jan 06 08:33:14 crc kubenswrapper[4784]: I0106 08:33:14.889125 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:33:14 crc kubenswrapper[4784]: I0106 08:33:14.889152 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:33:14 crc kubenswrapper[4784]: I0106 08:33:14.905274 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=10.254105421 podStartE2EDuration="23.905251788s" podCreationTimestamp="2026-01-06 08:32:51 +0000 UTC" firstStartedPulling="2026-01-06 08:33:00.890039048 +0000 UTC m=+1082.936211885" lastFinishedPulling="2026-01-06 08:33:14.541185415 +0000 UTC m=+1096.587358252" observedRunningTime="2026-01-06 08:33:14.904585586 +0000 UTC m=+1096.950758423" watchObservedRunningTime="2026-01-06 08:33:14.905251788 +0000 UTC m=+1096.951424625" Jan 06 08:33:14 crc kubenswrapper[4784]: I0106 08:33:14.938443 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=9.6708244 podStartE2EDuration="22.938414974s" podCreationTimestamp="2026-01-06 08:32:52 +0000 UTC" firstStartedPulling="2026-01-06 08:33:01.254046849 +0000 UTC m=+1083.300219686" lastFinishedPulling="2026-01-06 08:33:14.521637413 +0000 UTC m=+1096.567810260" observedRunningTime="2026-01-06 08:33:14.934326256 +0000 UTC m=+1096.980499103" watchObservedRunningTime="2026-01-06 08:33:14.938414974 +0000 UTC m=+1096.984587811" Jan 06 08:33:14 crc kubenswrapper[4784]: I0106 08:33:14.971018 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-2n9kz" podStartSLOduration=17.036936476 podStartE2EDuration="24.970989112s" podCreationTimestamp="2026-01-06 08:32:50 +0000 UTC" firstStartedPulling="2026-01-06 08:33:01.809256959 +0000 UTC m=+1083.855429786" lastFinishedPulling="2026-01-06 08:33:09.743309585 +0000 UTC m=+1091.789482422" observedRunningTime="2026-01-06 08:33:14.957943285 +0000 UTC m=+1097.004116122" watchObservedRunningTime="2026-01-06 08:33:14.970989112 +0000 UTC m=+1097.017161949" Jan 06 08:33:15 crc kubenswrapper[4784]: I0106 08:33:15.082853 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 06 08:33:15 crc kubenswrapper[4784]: I0106 08:33:15.135207 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 06 08:33:15 crc kubenswrapper[4784]: I0106 08:33:15.898791 4784 generic.go:334] "Generic (PLEG): container finished" podID="b6a277ac-73de-4e2b-b39f-73d467b2222c" containerID="5906b3d46bac70f13bc27fb60444d1ee0a413a195f0184464f3dcff5699e7583" exitCode=0 Jan 06 08:33:15 crc kubenswrapper[4784]: I0106 08:33:15.898865 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b6a277ac-73de-4e2b-b39f-73d467b2222c","Type":"ContainerDied","Data":"5906b3d46bac70f13bc27fb60444d1ee0a413a195f0184464f3dcff5699e7583"} Jan 06 08:33:15 crc kubenswrapper[4784]: I0106 08:33:15.903098 4784 generic.go:334] "Generic (PLEG): container finished" podID="32e811b4-672d-4aa2-905b-9406f594be5c" containerID="cc2cd38ef6247570e98675603c9844d91f21cf012397d97638c919f649c76139" exitCode=0 Jan 06 08:33:15 crc kubenswrapper[4784]: I0106 08:33:15.903367 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"32e811b4-672d-4aa2-905b-9406f594be5c","Type":"ContainerDied","Data":"cc2cd38ef6247570e98675603c9844d91f21cf012397d97638c919f649c76139"} Jan 06 08:33:15 crc kubenswrapper[4784]: I0106 08:33:15.904492 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 06 08:33:16 crc kubenswrapper[4784]: I0106 08:33:16.516783 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 06 08:33:16 crc kubenswrapper[4784]: I0106 08:33:16.564050 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 06 08:33:16 crc kubenswrapper[4784]: I0106 08:33:16.916457 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b6a277ac-73de-4e2b-b39f-73d467b2222c","Type":"ContainerStarted","Data":"e9133ce89d3aa5addf5d0b1b6c3f09deddf66ba69d3abc4a37dfab0a890825aa"} Jan 06 08:33:16 crc kubenswrapper[4784]: I0106 08:33:16.919862 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"32e811b4-672d-4aa2-905b-9406f594be5c","Type":"ContainerStarted","Data":"a04341a51a59aa6ffcaa076aa65515a7b5edcbbb44d3b167585185546814e56a"} Jan 06 08:33:16 crc kubenswrapper[4784]: I0106 08:33:16.920666 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 06 08:33:16 crc kubenswrapper[4784]: I0106 08:33:16.997039 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=26.342444094 podStartE2EDuration="34.996960057s" podCreationTimestamp="2026-01-06 08:32:42 +0000 UTC" firstStartedPulling="2026-01-06 08:33:01.176893316 +0000 UTC m=+1083.223066153" lastFinishedPulling="2026-01-06 08:33:09.831409279 +0000 UTC m=+1091.877582116" observedRunningTime="2026-01-06 08:33:16.952338011 +0000 UTC m=+1098.998510888" watchObservedRunningTime="2026-01-06 08:33:16.996960057 +0000 UTC m=+1099.043132924" Jan 06 08:33:16 crc kubenswrapper[4784]: I0106 08:33:16.998818 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.001941 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.005496 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=23.666745467 podStartE2EDuration="33.005479502s" podCreationTimestamp="2026-01-06 08:32:44 +0000 UTC" firstStartedPulling="2026-01-06 08:33:00.046255436 +0000 UTC m=+1082.092428263" lastFinishedPulling="2026-01-06 08:33:09.384989471 +0000 UTC m=+1091.431162298" observedRunningTime="2026-01-06 08:33:16.984468726 +0000 UTC m=+1099.030641563" watchObservedRunningTime="2026-01-06 08:33:17.005479502 +0000 UTC m=+1099.051652379" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.367669 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7878659675-k6wjl"] Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.381764 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7878659675-k6wjl"] Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.381939 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.385781 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.434123 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-bqm59"] Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.436594 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.439586 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.454277 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-ovsdbserver-nb\") pod \"dnsmasq-dns-7878659675-k6wjl\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.454341 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b70b310-f1bb-4b3b-b679-9c11f98367ee-combined-ca-bundle\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.454372 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/5b70b310-f1bb-4b3b-b679-9c11f98367ee-ovn-rundir\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.454433 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-dns-svc\") pod \"dnsmasq-dns-7878659675-k6wjl\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.454600 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8pvf\" (UniqueName: \"kubernetes.io/projected/5b70b310-f1bb-4b3b-b679-9c11f98367ee-kube-api-access-n8pvf\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.454663 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b70b310-f1bb-4b3b-b679-9c11f98367ee-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.454687 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/5b70b310-f1bb-4b3b-b679-9c11f98367ee-ovs-rundir\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.454724 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-config\") pod \"dnsmasq-dns-7878659675-k6wjl\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.454808 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wb6wn\" (UniqueName: \"kubernetes.io/projected/3f44211b-f863-4589-94fa-ab3c7fb12e29-kube-api-access-wb6wn\") pod \"dnsmasq-dns-7878659675-k6wjl\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.454857 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b70b310-f1bb-4b3b-b679-9c11f98367ee-config\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.461191 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-bqm59"] Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.513897 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.515196 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.521429 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.521459 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.521646 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-s82g4" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.521732 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.541303 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7878659675-k6wjl"] Jan 06 08:33:17 crc kubenswrapper[4784]: E0106 08:33:17.542157 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc kube-api-access-wb6wn ovsdbserver-nb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-7878659675-k6wjl" podUID="3f44211b-f863-4589-94fa-ab3c7fb12e29" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556253 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c98eb91-7877-4dd7-b694-52b017726242-scripts\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556323 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whc8k\" (UniqueName: \"kubernetes.io/projected/1c98eb91-7877-4dd7-b694-52b017726242-kube-api-access-whc8k\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556359 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8pvf\" (UniqueName: \"kubernetes.io/projected/5b70b310-f1bb-4b3b-b679-9c11f98367ee-kube-api-access-n8pvf\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556410 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b70b310-f1bb-4b3b-b679-9c11f98367ee-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556430 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556446 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/5b70b310-f1bb-4b3b-b679-9c11f98367ee-ovs-rundir\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556477 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556507 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-config\") pod \"dnsmasq-dns-7878659675-k6wjl\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556523 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556597 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wb6wn\" (UniqueName: \"kubernetes.io/projected/3f44211b-f863-4589-94fa-ab3c7fb12e29-kube-api-access-wb6wn\") pod \"dnsmasq-dns-7878659675-k6wjl\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556642 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b70b310-f1bb-4b3b-b679-9c11f98367ee-config\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556668 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-ovsdbserver-nb\") pod \"dnsmasq-dns-7878659675-k6wjl\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556696 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b70b310-f1bb-4b3b-b679-9c11f98367ee-combined-ca-bundle\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556737 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/5b70b310-f1bb-4b3b-b679-9c11f98367ee-ovn-rundir\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556754 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1c98eb91-7877-4dd7-b694-52b017726242-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556768 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-dns-svc\") pod \"dnsmasq-dns-7878659675-k6wjl\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.556804 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c98eb91-7877-4dd7-b694-52b017726242-config\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.557049 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/5b70b310-f1bb-4b3b-b679-9c11f98367ee-ovs-rundir\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.557371 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/5b70b310-f1bb-4b3b-b679-9c11f98367ee-ovn-rundir\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.558359 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-ovsdbserver-nb\") pod \"dnsmasq-dns-7878659675-k6wjl\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.558437 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-dns-svc\") pod \"dnsmasq-dns-7878659675-k6wjl\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.558655 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-config\") pod \"dnsmasq-dns-7878659675-k6wjl\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.558986 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b70b310-f1bb-4b3b-b679-9c11f98367ee-config\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.573522 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b70b310-f1bb-4b3b-b679-9c11f98367ee-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.573623 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.586070 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b70b310-f1bb-4b3b-b679-9c11f98367ee-combined-ca-bundle\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.594836 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wb6wn\" (UniqueName: \"kubernetes.io/projected/3f44211b-f863-4589-94fa-ab3c7fb12e29-kube-api-access-wb6wn\") pod \"dnsmasq-dns-7878659675-k6wjl\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.603449 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-k2xms"] Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.604874 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.608989 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.611382 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-k2xms"] Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.623239 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8pvf\" (UniqueName: \"kubernetes.io/projected/5b70b310-f1bb-4b3b-b679-9c11f98367ee-kube-api-access-n8pvf\") pod \"ovn-controller-metrics-bqm59\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.631981 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.657640 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-ovsdbserver-nb\") pod \"dnsmasq-dns-586b989cdc-k2xms\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.657955 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whc8k\" (UniqueName: \"kubernetes.io/projected/1c98eb91-7877-4dd7-b694-52b017726242-kube-api-access-whc8k\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.658115 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.658186 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.658368 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.658481 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smnl5\" (UniqueName: \"kubernetes.io/projected/394824b4-7aa5-4d50-80a5-44f10276145c-kube-api-access-smnl5\") pod \"dnsmasq-dns-586b989cdc-k2xms\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.658605 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-dns-svc\") pod \"dnsmasq-dns-586b989cdc-k2xms\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.658688 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-ovsdbserver-sb\") pod \"dnsmasq-dns-586b989cdc-k2xms\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.658811 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-config\") pod \"dnsmasq-dns-586b989cdc-k2xms\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.658907 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1c98eb91-7877-4dd7-b694-52b017726242-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.658996 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c98eb91-7877-4dd7-b694-52b017726242-config\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.659098 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c98eb91-7877-4dd7-b694-52b017726242-scripts\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.661410 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c98eb91-7877-4dd7-b694-52b017726242-scripts\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.661642 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1c98eb91-7877-4dd7-b694-52b017726242-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.662223 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c98eb91-7877-4dd7-b694-52b017726242-config\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.665713 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.667738 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.684703 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whc8k\" (UniqueName: \"kubernetes.io/projected/1c98eb91-7877-4dd7-b694-52b017726242-kube-api-access-whc8k\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.692300 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.761310 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smnl5\" (UniqueName: \"kubernetes.io/projected/394824b4-7aa5-4d50-80a5-44f10276145c-kube-api-access-smnl5\") pod \"dnsmasq-dns-586b989cdc-k2xms\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.761409 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-dns-svc\") pod \"dnsmasq-dns-586b989cdc-k2xms\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.761438 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-ovsdbserver-sb\") pod \"dnsmasq-dns-586b989cdc-k2xms\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.761509 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-config\") pod \"dnsmasq-dns-586b989cdc-k2xms\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.761607 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-ovsdbserver-nb\") pod \"dnsmasq-dns-586b989cdc-k2xms\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.763905 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-ovsdbserver-nb\") pod \"dnsmasq-dns-586b989cdc-k2xms\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.764337 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-ovsdbserver-sb\") pod \"dnsmasq-dns-586b989cdc-k2xms\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.764588 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-config\") pod \"dnsmasq-dns-586b989cdc-k2xms\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.765083 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-dns-svc\") pod \"dnsmasq-dns-586b989cdc-k2xms\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.774089 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.780050 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smnl5\" (UniqueName: \"kubernetes.io/projected/394824b4-7aa5-4d50-80a5-44f10276145c-kube-api-access-smnl5\") pod \"dnsmasq-dns-586b989cdc-k2xms\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.842313 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.929396 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:17 crc kubenswrapper[4784]: I0106 08:33:17.952777 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.050016 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.068917 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wb6wn\" (UniqueName: \"kubernetes.io/projected/3f44211b-f863-4589-94fa-ab3c7fb12e29-kube-api-access-wb6wn\") pod \"3f44211b-f863-4589-94fa-ab3c7fb12e29\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.069093 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-config\") pod \"3f44211b-f863-4589-94fa-ab3c7fb12e29\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.069153 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-ovsdbserver-nb\") pod \"3f44211b-f863-4589-94fa-ab3c7fb12e29\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.069469 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-dns-svc\") pod \"3f44211b-f863-4589-94fa-ab3c7fb12e29\" (UID: \"3f44211b-f863-4589-94fa-ab3c7fb12e29\") " Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.070435 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3f44211b-f863-4589-94fa-ab3c7fb12e29" (UID: "3f44211b-f863-4589-94fa-ab3c7fb12e29"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.070443 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-config" (OuterVolumeSpecName: "config") pod "3f44211b-f863-4589-94fa-ab3c7fb12e29" (UID: "3f44211b-f863-4589-94fa-ab3c7fb12e29"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.071991 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3f44211b-f863-4589-94fa-ab3c7fb12e29" (UID: "3f44211b-f863-4589-94fa-ab3c7fb12e29"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.080919 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f44211b-f863-4589-94fa-ab3c7fb12e29-kube-api-access-wb6wn" (OuterVolumeSpecName: "kube-api-access-wb6wn") pod "3f44211b-f863-4589-94fa-ab3c7fb12e29" (UID: "3f44211b-f863-4589-94fa-ab3c7fb12e29"). InnerVolumeSpecName "kube-api-access-wb6wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.171707 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wb6wn\" (UniqueName: \"kubernetes.io/projected/3f44211b-f863-4589-94fa-ab3c7fb12e29-kube-api-access-wb6wn\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.172100 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.172111 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.172121 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f44211b-f863-4589-94fa-ab3c7fb12e29-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.268960 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-bqm59"] Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.378632 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.481641 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-k2xms"] Jan 06 08:33:18 crc kubenswrapper[4784]: W0106 08:33:18.490122 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod394824b4_7aa5_4d50_80a5_44f10276145c.slice/crio-6c4b80863ce11e39cb8f911b8dba0c5bd785bf921bc74a3fe3d158b4ae6c5c92 WatchSource:0}: Error finding container 6c4b80863ce11e39cb8f911b8dba0c5bd785bf921bc74a3fe3d158b4ae6c5c92: Status 404 returned error can't find the container with id 6c4b80863ce11e39cb8f911b8dba0c5bd785bf921bc74a3fe3d158b4ae6c5c92 Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.945774 4784 generic.go:334] "Generic (PLEG): container finished" podID="394824b4-7aa5-4d50-80a5-44f10276145c" containerID="54769a4e7a6de99289a5e1298199227a1e7b248623c581a49b367eccdbd7dd31" exitCode=0 Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.945829 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-k2xms" event={"ID":"394824b4-7aa5-4d50-80a5-44f10276145c","Type":"ContainerDied","Data":"54769a4e7a6de99289a5e1298199227a1e7b248623c581a49b367eccdbd7dd31"} Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.945875 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-k2xms" event={"ID":"394824b4-7aa5-4d50-80a5-44f10276145c","Type":"ContainerStarted","Data":"6c4b80863ce11e39cb8f911b8dba0c5bd785bf921bc74a3fe3d158b4ae6c5c92"} Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.948971 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-bqm59" event={"ID":"5b70b310-f1bb-4b3b-b679-9c11f98367ee","Type":"ContainerStarted","Data":"45e04d5527cf93bae10ed29f13197ad0a095715926a5b063cac2ca9387bdb303"} Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.949022 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-bqm59" event={"ID":"5b70b310-f1bb-4b3b-b679-9c11f98367ee","Type":"ContainerStarted","Data":"6a5f31a38660a0231fa9ea01ceb4f4e2e4ada05e3d4dfcfd4bc391dccc8a7597"} Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.951405 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1c98eb91-7877-4dd7-b694-52b017726242","Type":"ContainerStarted","Data":"3ac7d055a3a2633de96c3ee58e08a4e27636b128faf21d74984d7e2fc3a9e22b"} Jan 06 08:33:18 crc kubenswrapper[4784]: I0106 08:33:18.951715 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-k6wjl" Jan 06 08:33:19 crc kubenswrapper[4784]: I0106 08:33:19.005402 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-bqm59" podStartSLOduration=2.005368001 podStartE2EDuration="2.005368001s" podCreationTimestamp="2026-01-06 08:33:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:33:18.999727295 +0000 UTC m=+1101.045900142" watchObservedRunningTime="2026-01-06 08:33:19.005368001 +0000 UTC m=+1101.051540858" Jan 06 08:33:19 crc kubenswrapper[4784]: I0106 08:33:19.114072 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7878659675-k6wjl"] Jan 06 08:33:19 crc kubenswrapper[4784]: I0106 08:33:19.124132 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7878659675-k6wjl"] Jan 06 08:33:19 crc kubenswrapper[4784]: I0106 08:33:19.962999 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1c98eb91-7877-4dd7-b694-52b017726242","Type":"ContainerStarted","Data":"b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c"} Jan 06 08:33:19 crc kubenswrapper[4784]: I0106 08:33:19.966435 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-k2xms" event={"ID":"394824b4-7aa5-4d50-80a5-44f10276145c","Type":"ContainerStarted","Data":"d391534b28d9430ef834ace6055f2618e533853adde95c582b32b777584ddcc9"} Jan 06 08:33:19 crc kubenswrapper[4784]: I0106 08:33:19.966936 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:19 crc kubenswrapper[4784]: I0106 08:33:19.993785 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-586b989cdc-k2xms" podStartSLOduration=2.993770714 podStartE2EDuration="2.993770714s" podCreationTimestamp="2026-01-06 08:33:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:33:19.989869442 +0000 UTC m=+1102.036042289" watchObservedRunningTime="2026-01-06 08:33:19.993770714 +0000 UTC m=+1102.039943541" Jan 06 08:33:20 crc kubenswrapper[4784]: I0106 08:33:20.327593 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f44211b-f863-4589-94fa-ab3c7fb12e29" path="/var/lib/kubelet/pods/3f44211b-f863-4589-94fa-ab3c7fb12e29/volumes" Jan 06 08:33:20 crc kubenswrapper[4784]: I0106 08:33:20.733643 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 06 08:33:20 crc kubenswrapper[4784]: I0106 08:33:20.975715 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1c98eb91-7877-4dd7-b694-52b017726242","Type":"ContainerStarted","Data":"3c30f92b8011e87722d0ed074d9c419ca54128be08ee18cd99b32d3ef8974baf"} Jan 06 08:33:20 crc kubenswrapper[4784]: I0106 08:33:20.997068 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.676702382 podStartE2EDuration="3.997049623s" podCreationTimestamp="2026-01-06 08:33:17 +0000 UTC" firstStartedPulling="2026-01-06 08:33:18.387712 +0000 UTC m=+1100.433884857" lastFinishedPulling="2026-01-06 08:33:19.708059251 +0000 UTC m=+1101.754232098" observedRunningTime="2026-01-06 08:33:20.994942217 +0000 UTC m=+1103.041115064" watchObservedRunningTime="2026-01-06 08:33:20.997049623 +0000 UTC m=+1103.043222460" Jan 06 08:33:21 crc kubenswrapper[4784]: I0106 08:33:21.989436 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 06 08:33:24 crc kubenswrapper[4784]: I0106 08:33:24.171438 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 06 08:33:24 crc kubenswrapper[4784]: I0106 08:33:24.172087 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 06 08:33:24 crc kubenswrapper[4784]: I0106 08:33:24.958629 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 06 08:33:25 crc kubenswrapper[4784]: I0106 08:33:25.110480 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="b6a277ac-73de-4e2b-b39f-73d467b2222c" containerName="galera" probeResult="failure" output=< Jan 06 08:33:25 crc kubenswrapper[4784]: wsrep_local_state_comment (Joined) differs from Synced Jan 06 08:33:25 crc kubenswrapper[4784]: > Jan 06 08:33:25 crc kubenswrapper[4784]: I0106 08:33:25.563369 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 06 08:33:25 crc kubenswrapper[4784]: I0106 08:33:25.563425 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 06 08:33:25 crc kubenswrapper[4784]: I0106 08:33:25.627598 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 06 08:33:26 crc kubenswrapper[4784]: I0106 08:33:26.098277 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.731371 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-k2xms"] Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.731915 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-586b989cdc-k2xms" podUID="394824b4-7aa5-4d50-80a5-44f10276145c" containerName="dnsmasq-dns" containerID="cri-o://d391534b28d9430ef834ace6055f2618e533853adde95c582b32b777584ddcc9" gracePeriod=10 Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.733803 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.782088 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-5fw7r"] Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.783748 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.809765 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-5fw7r"] Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.886603 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-ovsdbserver-nb\") pod \"dnsmasq-dns-67fdf7998c-5fw7r\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.886652 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-ovsdbserver-sb\") pod \"dnsmasq-dns-67fdf7998c-5fw7r\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.886778 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csfw5\" (UniqueName: \"kubernetes.io/projected/90bf95cf-3c80-48fe-874d-5525c724a219-kube-api-access-csfw5\") pod \"dnsmasq-dns-67fdf7998c-5fw7r\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.886804 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-config\") pod \"dnsmasq-dns-67fdf7998c-5fw7r\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.886824 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-dns-svc\") pod \"dnsmasq-dns-67fdf7998c-5fw7r\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.988557 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csfw5\" (UniqueName: \"kubernetes.io/projected/90bf95cf-3c80-48fe-874d-5525c724a219-kube-api-access-csfw5\") pod \"dnsmasq-dns-67fdf7998c-5fw7r\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.988652 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-config\") pod \"dnsmasq-dns-67fdf7998c-5fw7r\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.988685 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-dns-svc\") pod \"dnsmasq-dns-67fdf7998c-5fw7r\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.988743 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-ovsdbserver-nb\") pod \"dnsmasq-dns-67fdf7998c-5fw7r\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.988775 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-ovsdbserver-sb\") pod \"dnsmasq-dns-67fdf7998c-5fw7r\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.989845 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-config\") pod \"dnsmasq-dns-67fdf7998c-5fw7r\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.989987 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-dns-svc\") pod \"dnsmasq-dns-67fdf7998c-5fw7r\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.990097 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-ovsdbserver-nb\") pod \"dnsmasq-dns-67fdf7998c-5fw7r\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:27 crc kubenswrapper[4784]: I0106 08:33:27.990129 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-ovsdbserver-sb\") pod \"dnsmasq-dns-67fdf7998c-5fw7r\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:28 crc kubenswrapper[4784]: I0106 08:33:28.013072 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csfw5\" (UniqueName: \"kubernetes.io/projected/90bf95cf-3c80-48fe-874d-5525c724a219-kube-api-access-csfw5\") pod \"dnsmasq-dns-67fdf7998c-5fw7r\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:28 crc kubenswrapper[4784]: I0106 08:33:28.051590 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-586b989cdc-k2xms" podUID="394824b4-7aa5-4d50-80a5-44f10276145c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Jan 06 08:33:28 crc kubenswrapper[4784]: I0106 08:33:28.110938 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:28 crc kubenswrapper[4784]: I0106 08:33:28.432099 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-5fw7r"] Jan 06 08:33:28 crc kubenswrapper[4784]: I0106 08:33:28.910269 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 06 08:33:28 crc kubenswrapper[4784]: I0106 08:33:28.951526 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 06 08:33:28 crc kubenswrapper[4784]: I0106 08:33:28.951715 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 06 08:33:28 crc kubenswrapper[4784]: I0106 08:33:28.954144 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 06 08:33:28 crc kubenswrapper[4784]: I0106 08:33:28.954359 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-dlvcc" Jan 06 08:33:28 crc kubenswrapper[4784]: I0106 08:33:28.955529 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 06 08:33:28 crc kubenswrapper[4784]: I0106 08:33:28.955636 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.051352 4784 generic.go:334] "Generic (PLEG): container finished" podID="394824b4-7aa5-4d50-80a5-44f10276145c" containerID="d391534b28d9430ef834ace6055f2618e533853adde95c582b32b777584ddcc9" exitCode=0 Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.051427 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-k2xms" event={"ID":"394824b4-7aa5-4d50-80a5-44f10276145c","Type":"ContainerDied","Data":"d391534b28d9430ef834ace6055f2618e533853adde95c582b32b777584ddcc9"} Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.053457 4784 generic.go:334] "Generic (PLEG): container finished" podID="90bf95cf-3c80-48fe-874d-5525c724a219" containerID="1353ca0bb2b5d2e016258e6808bc64f6f37476683b15af95e05da9eef1f4381e" exitCode=0 Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.053491 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" event={"ID":"90bf95cf-3c80-48fe-874d-5525c724a219","Type":"ContainerDied","Data":"1353ca0bb2b5d2e016258e6808bc64f6f37476683b15af95e05da9eef1f4381e"} Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.053509 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" event={"ID":"90bf95cf-3c80-48fe-874d-5525c724a219","Type":"ContainerStarted","Data":"c52670684e06f7ba11b3b831b72ecc3bc11c1d431f1a842b162a17652ac98789"} Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.111259 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/a2bc0281-fc27-4766-87fa-f16599938e96-cache\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.111752 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/a2bc0281-fc27-4766-87fa-f16599938e96-lock\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.111819 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.111863 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.111915 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pg6c9\" (UniqueName: \"kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-kube-api-access-pg6c9\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.214876 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/a2bc0281-fc27-4766-87fa-f16599938e96-lock\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.215056 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.215112 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.215231 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pg6c9\" (UniqueName: \"kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-kube-api-access-pg6c9\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.215274 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/a2bc0281-fc27-4766-87fa-f16599938e96-cache\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: E0106 08:33:29.215411 4784 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 06 08:33:29 crc kubenswrapper[4784]: E0106 08:33:29.215458 4784 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 06 08:33:29 crc kubenswrapper[4784]: E0106 08:33:29.215522 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift podName:a2bc0281-fc27-4766-87fa-f16599938e96 nodeName:}" failed. No retries permitted until 2026-01-06 08:33:29.715499228 +0000 UTC m=+1111.761672245 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift") pod "swift-storage-0" (UID: "a2bc0281-fc27-4766-87fa-f16599938e96") : configmap "swift-ring-files" not found Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.215952 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.216030 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/a2bc0281-fc27-4766-87fa-f16599938e96-cache\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.216166 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/a2bc0281-fc27-4766-87fa-f16599938e96-lock\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.240594 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pg6c9\" (UniqueName: \"kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-kube-api-access-pg6c9\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.246372 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.324006 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.418241 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smnl5\" (UniqueName: \"kubernetes.io/projected/394824b4-7aa5-4d50-80a5-44f10276145c-kube-api-access-smnl5\") pod \"394824b4-7aa5-4d50-80a5-44f10276145c\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.418656 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-ovsdbserver-sb\") pod \"394824b4-7aa5-4d50-80a5-44f10276145c\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.418697 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-config\") pod \"394824b4-7aa5-4d50-80a5-44f10276145c\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.418718 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-dns-svc\") pod \"394824b4-7aa5-4d50-80a5-44f10276145c\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.418849 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-ovsdbserver-nb\") pod \"394824b4-7aa5-4d50-80a5-44f10276145c\" (UID: \"394824b4-7aa5-4d50-80a5-44f10276145c\") " Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.430054 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/394824b4-7aa5-4d50-80a5-44f10276145c-kube-api-access-smnl5" (OuterVolumeSpecName: "kube-api-access-smnl5") pod "394824b4-7aa5-4d50-80a5-44f10276145c" (UID: "394824b4-7aa5-4d50-80a5-44f10276145c"). InnerVolumeSpecName "kube-api-access-smnl5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.502289 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "394824b4-7aa5-4d50-80a5-44f10276145c" (UID: "394824b4-7aa5-4d50-80a5-44f10276145c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.507148 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-config" (OuterVolumeSpecName: "config") pod "394824b4-7aa5-4d50-80a5-44f10276145c" (UID: "394824b4-7aa5-4d50-80a5-44f10276145c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.511211 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "394824b4-7aa5-4d50-80a5-44f10276145c" (UID: "394824b4-7aa5-4d50-80a5-44f10276145c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.521533 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.521580 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.521589 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.521600 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smnl5\" (UniqueName: \"kubernetes.io/projected/394824b4-7aa5-4d50-80a5-44f10276145c-kube-api-access-smnl5\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.574344 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "394824b4-7aa5-4d50-80a5-44f10276145c" (UID: "394824b4-7aa5-4d50-80a5-44f10276145c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.575323 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-499kt"] Jan 06 08:33:29 crc kubenswrapper[4784]: E0106 08:33:29.575855 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="394824b4-7aa5-4d50-80a5-44f10276145c" containerName="init" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.575874 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="394824b4-7aa5-4d50-80a5-44f10276145c" containerName="init" Jan 06 08:33:29 crc kubenswrapper[4784]: E0106 08:33:29.575908 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="394824b4-7aa5-4d50-80a5-44f10276145c" containerName="dnsmasq-dns" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.575915 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="394824b4-7aa5-4d50-80a5-44f10276145c" containerName="dnsmasq-dns" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.576071 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="394824b4-7aa5-4d50-80a5-44f10276145c" containerName="dnsmasq-dns" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.576631 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.581636 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-499kt"] Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.584534 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.587743 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.587752 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.624375 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/394824b4-7aa5-4d50-80a5-44f10276145c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.725767 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-combined-ca-bundle\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.725826 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szsbn\" (UniqueName: \"kubernetes.io/projected/036cffd0-4911-4b85-b573-5aefd8bd124a-kube-api-access-szsbn\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.725876 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/036cffd0-4911-4b85-b573-5aefd8bd124a-etc-swift\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.726176 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/036cffd0-4911-4b85-b573-5aefd8bd124a-scripts\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.726318 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.726390 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-dispersionconf\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.726754 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-swiftconf\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.726789 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/036cffd0-4911-4b85-b573-5aefd8bd124a-ring-data-devices\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: E0106 08:33:29.726939 4784 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 06 08:33:29 crc kubenswrapper[4784]: E0106 08:33:29.727060 4784 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 06 08:33:29 crc kubenswrapper[4784]: E0106 08:33:29.727214 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift podName:a2bc0281-fc27-4766-87fa-f16599938e96 nodeName:}" failed. No retries permitted until 2026-01-06 08:33:30.727185989 +0000 UTC m=+1112.773358816 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift") pod "swift-storage-0" (UID: "a2bc0281-fc27-4766-87fa-f16599938e96") : configmap "swift-ring-files" not found Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.829236 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-dispersionconf\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.829413 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-swiftconf\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.829475 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/036cffd0-4911-4b85-b573-5aefd8bd124a-ring-data-devices\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.829583 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-combined-ca-bundle\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.829615 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szsbn\" (UniqueName: \"kubernetes.io/projected/036cffd0-4911-4b85-b573-5aefd8bd124a-kube-api-access-szsbn\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.829689 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/036cffd0-4911-4b85-b573-5aefd8bd124a-etc-swift\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.829769 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/036cffd0-4911-4b85-b573-5aefd8bd124a-scripts\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.830649 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/036cffd0-4911-4b85-b573-5aefd8bd124a-etc-swift\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.831146 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/036cffd0-4911-4b85-b573-5aefd8bd124a-ring-data-devices\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.831900 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/036cffd0-4911-4b85-b573-5aefd8bd124a-scripts\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.834221 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-swiftconf\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.835101 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-combined-ca-bundle\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.835439 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-dispersionconf\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.853209 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szsbn\" (UniqueName: \"kubernetes.io/projected/036cffd0-4911-4b85-b573-5aefd8bd124a-kube-api-access-szsbn\") pod \"swift-ring-rebalance-499kt\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:29 crc kubenswrapper[4784]: I0106 08:33:29.896598 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:30 crc kubenswrapper[4784]: I0106 08:33:30.067140 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" event={"ID":"90bf95cf-3c80-48fe-874d-5525c724a219","Type":"ContainerStarted","Data":"7aa2022b04e1d7e859677179f803d6836738f66967d19b38d26815b72e459e21"} Jan 06 08:33:30 crc kubenswrapper[4784]: I0106 08:33:30.067269 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:30 crc kubenswrapper[4784]: I0106 08:33:30.069610 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-k2xms" event={"ID":"394824b4-7aa5-4d50-80a5-44f10276145c","Type":"ContainerDied","Data":"6c4b80863ce11e39cb8f911b8dba0c5bd785bf921bc74a3fe3d158b4ae6c5c92"} Jan 06 08:33:30 crc kubenswrapper[4784]: I0106 08:33:30.069711 4784 scope.go:117] "RemoveContainer" containerID="d391534b28d9430ef834ace6055f2618e533853adde95c582b32b777584ddcc9" Jan 06 08:33:30 crc kubenswrapper[4784]: I0106 08:33:30.069721 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-k2xms" Jan 06 08:33:30 crc kubenswrapper[4784]: I0106 08:33:30.107708 4784 scope.go:117] "RemoveContainer" containerID="54769a4e7a6de99289a5e1298199227a1e7b248623c581a49b367eccdbd7dd31" Jan 06 08:33:30 crc kubenswrapper[4784]: I0106 08:33:30.153416 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" podStartSLOduration=3.153377785 podStartE2EDuration="3.153377785s" podCreationTimestamp="2026-01-06 08:33:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:33:30.123983659 +0000 UTC m=+1112.170156496" watchObservedRunningTime="2026-01-06 08:33:30.153377785 +0000 UTC m=+1112.199550622" Jan 06 08:33:30 crc kubenswrapper[4784]: I0106 08:33:30.188625 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-k2xms"] Jan 06 08:33:30 crc kubenswrapper[4784]: I0106 08:33:30.201814 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-k2xms"] Jan 06 08:33:30 crc kubenswrapper[4784]: I0106 08:33:30.333087 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="394824b4-7aa5-4d50-80a5-44f10276145c" path="/var/lib/kubelet/pods/394824b4-7aa5-4d50-80a5-44f10276145c/volumes" Jan 06 08:33:30 crc kubenswrapper[4784]: I0106 08:33:30.587165 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-499kt"] Jan 06 08:33:30 crc kubenswrapper[4784]: I0106 08:33:30.754913 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:30 crc kubenswrapper[4784]: E0106 08:33:30.755190 4784 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 06 08:33:30 crc kubenswrapper[4784]: E0106 08:33:30.755478 4784 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 06 08:33:30 crc kubenswrapper[4784]: E0106 08:33:30.755587 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift podName:a2bc0281-fc27-4766-87fa-f16599938e96 nodeName:}" failed. No retries permitted until 2026-01-06 08:33:32.755554797 +0000 UTC m=+1114.801727644 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift") pod "swift-storage-0" (UID: "a2bc0281-fc27-4766-87fa-f16599938e96") : configmap "swift-ring-files" not found Jan 06 08:33:31 crc kubenswrapper[4784]: I0106 08:33:31.081688 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-499kt" event={"ID":"036cffd0-4911-4b85-b573-5aefd8bd124a","Type":"ContainerStarted","Data":"b4d9588ff30afb3c5986e0f469fcf8dfc3132f8cae4369aff9789982f75df1f4"} Jan 06 08:33:32 crc kubenswrapper[4784]: I0106 08:33:32.801042 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:32 crc kubenswrapper[4784]: E0106 08:33:32.801328 4784 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 06 08:33:32 crc kubenswrapper[4784]: E0106 08:33:32.801641 4784 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 06 08:33:32 crc kubenswrapper[4784]: E0106 08:33:32.801708 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift podName:a2bc0281-fc27-4766-87fa-f16599938e96 nodeName:}" failed. No retries permitted until 2026-01-06 08:33:36.801686711 +0000 UTC m=+1118.847859558 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift") pod "swift-storage-0" (UID: "a2bc0281-fc27-4766-87fa-f16599938e96") : configmap "swift-ring-files" not found Jan 06 08:33:32 crc kubenswrapper[4784]: I0106 08:33:32.921899 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 06 08:33:34 crc kubenswrapper[4784]: I0106 08:33:34.298507 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 06 08:33:34 crc kubenswrapper[4784]: I0106 08:33:34.348634 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-bfn2t"] Jan 06 08:33:34 crc kubenswrapper[4784]: I0106 08:33:34.350068 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-bfn2t" Jan 06 08:33:34 crc kubenswrapper[4784]: I0106 08:33:34.354320 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 06 08:33:34 crc kubenswrapper[4784]: I0106 08:33:34.364440 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-bfn2t"] Jan 06 08:33:34 crc kubenswrapper[4784]: I0106 08:33:34.442668 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1e6cffd-3296-4d02-8566-5980e357fc22-operator-scripts\") pod \"root-account-create-update-bfn2t\" (UID: \"e1e6cffd-3296-4d02-8566-5980e357fc22\") " pod="openstack/root-account-create-update-bfn2t" Jan 06 08:33:34 crc kubenswrapper[4784]: I0106 08:33:34.442753 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbxg7\" (UniqueName: \"kubernetes.io/projected/e1e6cffd-3296-4d02-8566-5980e357fc22-kube-api-access-rbxg7\") pod \"root-account-create-update-bfn2t\" (UID: \"e1e6cffd-3296-4d02-8566-5980e357fc22\") " pod="openstack/root-account-create-update-bfn2t" Jan 06 08:33:34 crc kubenswrapper[4784]: I0106 08:33:34.545816 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1e6cffd-3296-4d02-8566-5980e357fc22-operator-scripts\") pod \"root-account-create-update-bfn2t\" (UID: \"e1e6cffd-3296-4d02-8566-5980e357fc22\") " pod="openstack/root-account-create-update-bfn2t" Jan 06 08:33:34 crc kubenswrapper[4784]: I0106 08:33:34.545889 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbxg7\" (UniqueName: \"kubernetes.io/projected/e1e6cffd-3296-4d02-8566-5980e357fc22-kube-api-access-rbxg7\") pod \"root-account-create-update-bfn2t\" (UID: \"e1e6cffd-3296-4d02-8566-5980e357fc22\") " pod="openstack/root-account-create-update-bfn2t" Jan 06 08:33:34 crc kubenswrapper[4784]: I0106 08:33:34.549310 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1e6cffd-3296-4d02-8566-5980e357fc22-operator-scripts\") pod \"root-account-create-update-bfn2t\" (UID: \"e1e6cffd-3296-4d02-8566-5980e357fc22\") " pod="openstack/root-account-create-update-bfn2t" Jan 06 08:33:34 crc kubenswrapper[4784]: I0106 08:33:34.567668 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbxg7\" (UniqueName: \"kubernetes.io/projected/e1e6cffd-3296-4d02-8566-5980e357fc22-kube-api-access-rbxg7\") pod \"root-account-create-update-bfn2t\" (UID: \"e1e6cffd-3296-4d02-8566-5980e357fc22\") " pod="openstack/root-account-create-update-bfn2t" Jan 06 08:33:34 crc kubenswrapper[4784]: I0106 08:33:34.694316 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-bfn2t" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.136516 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-499kt" event={"ID":"036cffd0-4911-4b85-b573-5aefd8bd124a","Type":"ContainerStarted","Data":"5c0e38915efc283eeec7d3f028569e5f0d8bb0816bdfbc43c74e9799b2ed11e1"} Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.154754 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-499kt" podStartSLOduration=2.055148777 podStartE2EDuration="6.154733713s" podCreationTimestamp="2026-01-06 08:33:29 +0000 UTC" firstStartedPulling="2026-01-06 08:33:30.595069284 +0000 UTC m=+1112.641242121" lastFinishedPulling="2026-01-06 08:33:34.69465422 +0000 UTC m=+1116.740827057" observedRunningTime="2026-01-06 08:33:35.153195684 +0000 UTC m=+1117.199368521" watchObservedRunningTime="2026-01-06 08:33:35.154733713 +0000 UTC m=+1117.200906550" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.283065 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-bfn2t"] Jan 06 08:33:35 crc kubenswrapper[4784]: W0106 08:33:35.286477 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode1e6cffd_3296_4d02_8566_5980e357fc22.slice/crio-81c9f46c2bccdbb2d43db22089334309ca54bb7f5ed0223cd238e28ea16723d4 WatchSource:0}: Error finding container 81c9f46c2bccdbb2d43db22089334309ca54bb7f5ed0223cd238e28ea16723d4: Status 404 returned error can't find the container with id 81c9f46c2bccdbb2d43db22089334309ca54bb7f5ed0223cd238e28ea16723d4 Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.391116 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-c9v66"] Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.394727 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-c9v66" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.407017 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-c9v66"] Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.539871 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-dc75-account-create-update-vmhbs"] Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.542349 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc75-account-create-update-vmhbs" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.546015 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.559517 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-dc75-account-create-update-vmhbs"] Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.574415 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98272c8d-18ec-4660-9be3-ea08362a5b57-operator-scripts\") pod \"keystone-db-create-c9v66\" (UID: \"98272c8d-18ec-4660-9be3-ea08362a5b57\") " pod="openstack/keystone-db-create-c9v66" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.575366 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggbxx\" (UniqueName: \"kubernetes.io/projected/98272c8d-18ec-4660-9be3-ea08362a5b57-kube-api-access-ggbxx\") pod \"keystone-db-create-c9v66\" (UID: \"98272c8d-18ec-4660-9be3-ea08362a5b57\") " pod="openstack/keystone-db-create-c9v66" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.671950 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-r2bgl"] Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.677203 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98272c8d-18ec-4660-9be3-ea08362a5b57-operator-scripts\") pod \"keystone-db-create-c9v66\" (UID: \"98272c8d-18ec-4660-9be3-ea08362a5b57\") " pod="openstack/keystone-db-create-c9v66" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.677288 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jx9jv\" (UniqueName: \"kubernetes.io/projected/e0e26028-4d3e-493a-859e-2dd0149d7174-kube-api-access-jx9jv\") pod \"keystone-dc75-account-create-update-vmhbs\" (UID: \"e0e26028-4d3e-493a-859e-2dd0149d7174\") " pod="openstack/keystone-dc75-account-create-update-vmhbs" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.677342 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggbxx\" (UniqueName: \"kubernetes.io/projected/98272c8d-18ec-4660-9be3-ea08362a5b57-kube-api-access-ggbxx\") pod \"keystone-db-create-c9v66\" (UID: \"98272c8d-18ec-4660-9be3-ea08362a5b57\") " pod="openstack/keystone-db-create-c9v66" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.677416 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0e26028-4d3e-493a-859e-2dd0149d7174-operator-scripts\") pod \"keystone-dc75-account-create-update-vmhbs\" (UID: \"e0e26028-4d3e-493a-859e-2dd0149d7174\") " pod="openstack/keystone-dc75-account-create-update-vmhbs" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.678320 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98272c8d-18ec-4660-9be3-ea08362a5b57-operator-scripts\") pod \"keystone-db-create-c9v66\" (UID: \"98272c8d-18ec-4660-9be3-ea08362a5b57\") " pod="openstack/keystone-db-create-c9v66" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.681803 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-r2bgl" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.691601 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-r2bgl"] Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.724485 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggbxx\" (UniqueName: \"kubernetes.io/projected/98272c8d-18ec-4660-9be3-ea08362a5b57-kube-api-access-ggbxx\") pod \"keystone-db-create-c9v66\" (UID: \"98272c8d-18ec-4660-9be3-ea08362a5b57\") " pod="openstack/keystone-db-create-c9v66" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.750789 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-c9v66" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.779822 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7rr6\" (UniqueName: \"kubernetes.io/projected/e64dac25-080f-43cf-9b56-8fca9d178614-kube-api-access-j7rr6\") pod \"placement-db-create-r2bgl\" (UID: \"e64dac25-080f-43cf-9b56-8fca9d178614\") " pod="openstack/placement-db-create-r2bgl" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.780811 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jx9jv\" (UniqueName: \"kubernetes.io/projected/e0e26028-4d3e-493a-859e-2dd0149d7174-kube-api-access-jx9jv\") pod \"keystone-dc75-account-create-update-vmhbs\" (UID: \"e0e26028-4d3e-493a-859e-2dd0149d7174\") " pod="openstack/keystone-dc75-account-create-update-vmhbs" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.780911 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e64dac25-080f-43cf-9b56-8fca9d178614-operator-scripts\") pod \"placement-db-create-r2bgl\" (UID: \"e64dac25-080f-43cf-9b56-8fca9d178614\") " pod="openstack/placement-db-create-r2bgl" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.781146 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0e26028-4d3e-493a-859e-2dd0149d7174-operator-scripts\") pod \"keystone-dc75-account-create-update-vmhbs\" (UID: \"e0e26028-4d3e-493a-859e-2dd0149d7174\") " pod="openstack/keystone-dc75-account-create-update-vmhbs" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.782478 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0e26028-4d3e-493a-859e-2dd0149d7174-operator-scripts\") pod \"keystone-dc75-account-create-update-vmhbs\" (UID: \"e0e26028-4d3e-493a-859e-2dd0149d7174\") " pod="openstack/keystone-dc75-account-create-update-vmhbs" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.802675 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jx9jv\" (UniqueName: \"kubernetes.io/projected/e0e26028-4d3e-493a-859e-2dd0149d7174-kube-api-access-jx9jv\") pod \"keystone-dc75-account-create-update-vmhbs\" (UID: \"e0e26028-4d3e-493a-859e-2dd0149d7174\") " pod="openstack/keystone-dc75-account-create-update-vmhbs" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.885215 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7rr6\" (UniqueName: \"kubernetes.io/projected/e64dac25-080f-43cf-9b56-8fca9d178614-kube-api-access-j7rr6\") pod \"placement-db-create-r2bgl\" (UID: \"e64dac25-080f-43cf-9b56-8fca9d178614\") " pod="openstack/placement-db-create-r2bgl" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.886862 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e64dac25-080f-43cf-9b56-8fca9d178614-operator-scripts\") pod \"placement-db-create-r2bgl\" (UID: \"e64dac25-080f-43cf-9b56-8fca9d178614\") " pod="openstack/placement-db-create-r2bgl" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.889744 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e64dac25-080f-43cf-9b56-8fca9d178614-operator-scripts\") pod \"placement-db-create-r2bgl\" (UID: \"e64dac25-080f-43cf-9b56-8fca9d178614\") " pod="openstack/placement-db-create-r2bgl" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.897424 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-c594-account-create-update-k9b28"] Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.899100 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c594-account-create-update-k9b28" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.902786 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.919553 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-c594-account-create-update-k9b28"] Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.962370 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7rr6\" (UniqueName: \"kubernetes.io/projected/e64dac25-080f-43cf-9b56-8fca9d178614-kube-api-access-j7rr6\") pod \"placement-db-create-r2bgl\" (UID: \"e64dac25-080f-43cf-9b56-8fca9d178614\") " pod="openstack/placement-db-create-r2bgl" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.974571 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc75-account-create-update-vmhbs" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.991594 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c39e426-916f-4f00-927d-fad49789b75e-operator-scripts\") pod \"placement-c594-account-create-update-k9b28\" (UID: \"3c39e426-916f-4f00-927d-fad49789b75e\") " pod="openstack/placement-c594-account-create-update-k9b28" Jan 06 08:33:35 crc kubenswrapper[4784]: I0106 08:33:35.991706 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plfkr\" (UniqueName: \"kubernetes.io/projected/3c39e426-916f-4f00-927d-fad49789b75e-kube-api-access-plfkr\") pod \"placement-c594-account-create-update-k9b28\" (UID: \"3c39e426-916f-4f00-927d-fad49789b75e\") " pod="openstack/placement-c594-account-create-update-k9b28" Jan 06 08:33:36 crc kubenswrapper[4784]: I0106 08:33:36.001374 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-r2bgl" Jan 06 08:33:36 crc kubenswrapper[4784]: I0106 08:33:36.094761 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plfkr\" (UniqueName: \"kubernetes.io/projected/3c39e426-916f-4f00-927d-fad49789b75e-kube-api-access-plfkr\") pod \"placement-c594-account-create-update-k9b28\" (UID: \"3c39e426-916f-4f00-927d-fad49789b75e\") " pod="openstack/placement-c594-account-create-update-k9b28" Jan 06 08:33:36 crc kubenswrapper[4784]: I0106 08:33:36.094972 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c39e426-916f-4f00-927d-fad49789b75e-operator-scripts\") pod \"placement-c594-account-create-update-k9b28\" (UID: \"3c39e426-916f-4f00-927d-fad49789b75e\") " pod="openstack/placement-c594-account-create-update-k9b28" Jan 06 08:33:36 crc kubenswrapper[4784]: I0106 08:33:36.095810 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c39e426-916f-4f00-927d-fad49789b75e-operator-scripts\") pod \"placement-c594-account-create-update-k9b28\" (UID: \"3c39e426-916f-4f00-927d-fad49789b75e\") " pod="openstack/placement-c594-account-create-update-k9b28" Jan 06 08:33:36 crc kubenswrapper[4784]: I0106 08:33:36.116984 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plfkr\" (UniqueName: \"kubernetes.io/projected/3c39e426-916f-4f00-927d-fad49789b75e-kube-api-access-plfkr\") pod \"placement-c594-account-create-update-k9b28\" (UID: \"3c39e426-916f-4f00-927d-fad49789b75e\") " pod="openstack/placement-c594-account-create-update-k9b28" Jan 06 08:33:36 crc kubenswrapper[4784]: I0106 08:33:36.184188 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-bfn2t" event={"ID":"e1e6cffd-3296-4d02-8566-5980e357fc22","Type":"ContainerStarted","Data":"6443e9dad677f700ab0c1e5c54226984deea521b5e8b2b7abf11054f95a416e2"} Jan 06 08:33:36 crc kubenswrapper[4784]: I0106 08:33:36.184745 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-bfn2t" event={"ID":"e1e6cffd-3296-4d02-8566-5980e357fc22","Type":"ContainerStarted","Data":"81c9f46c2bccdbb2d43db22089334309ca54bb7f5ed0223cd238e28ea16723d4"} Jan 06 08:33:36 crc kubenswrapper[4784]: I0106 08:33:36.231185 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-bfn2t" podStartSLOduration=2.231151818 podStartE2EDuration="2.231151818s" podCreationTimestamp="2026-01-06 08:33:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:33:36.215310394 +0000 UTC m=+1118.261483241" watchObservedRunningTime="2026-01-06 08:33:36.231151818 +0000 UTC m=+1118.277324655" Jan 06 08:33:36 crc kubenswrapper[4784]: I0106 08:33:36.237520 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c594-account-create-update-k9b28" Jan 06 08:33:36 crc kubenswrapper[4784]: I0106 08:33:36.275152 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-c9v66"] Jan 06 08:33:36 crc kubenswrapper[4784]: W0106 08:33:36.556495 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode64dac25_080f_43cf_9b56_8fca9d178614.slice/crio-b94b6760d91f6450f52cfc0110ff474694979c05eff505305985239d72202619 WatchSource:0}: Error finding container b94b6760d91f6450f52cfc0110ff474694979c05eff505305985239d72202619: Status 404 returned error can't find the container with id b94b6760d91f6450f52cfc0110ff474694979c05eff505305985239d72202619 Jan 06 08:33:36 crc kubenswrapper[4784]: I0106 08:33:36.574630 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-r2bgl"] Jan 06 08:33:36 crc kubenswrapper[4784]: I0106 08:33:36.629197 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-dc75-account-create-update-vmhbs"] Jan 06 08:33:36 crc kubenswrapper[4784]: I0106 08:33:36.750376 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-c594-account-create-update-k9b28"] Jan 06 08:33:36 crc kubenswrapper[4784]: I0106 08:33:36.813229 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:36 crc kubenswrapper[4784]: E0106 08:33:36.813481 4784 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 06 08:33:36 crc kubenswrapper[4784]: E0106 08:33:36.813526 4784 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 06 08:33:36 crc kubenswrapper[4784]: E0106 08:33:36.813618 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift podName:a2bc0281-fc27-4766-87fa-f16599938e96 nodeName:}" failed. No retries permitted until 2026-01-06 08:33:44.813592905 +0000 UTC m=+1126.859765742 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift") pod "swift-storage-0" (UID: "a2bc0281-fc27-4766-87fa-f16599938e96") : configmap "swift-ring-files" not found Jan 06 08:33:37 crc kubenswrapper[4784]: I0106 08:33:37.194301 4784 generic.go:334] "Generic (PLEG): container finished" podID="98272c8d-18ec-4660-9be3-ea08362a5b57" containerID="e1117f40ed13f63cf96a0926c33e046c914afc8710bb6b882701784248e89db6" exitCode=0 Jan 06 08:33:37 crc kubenswrapper[4784]: I0106 08:33:37.194382 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-c9v66" event={"ID":"98272c8d-18ec-4660-9be3-ea08362a5b57","Type":"ContainerDied","Data":"e1117f40ed13f63cf96a0926c33e046c914afc8710bb6b882701784248e89db6"} Jan 06 08:33:37 crc kubenswrapper[4784]: I0106 08:33:37.194420 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-c9v66" event={"ID":"98272c8d-18ec-4660-9be3-ea08362a5b57","Type":"ContainerStarted","Data":"d413ea0d33806c4b85522c4f800a2a434dcc48c10876ee41da517725dc0cdc14"} Jan 06 08:33:37 crc kubenswrapper[4784]: I0106 08:33:37.196439 4784 generic.go:334] "Generic (PLEG): container finished" podID="e1e6cffd-3296-4d02-8566-5980e357fc22" containerID="6443e9dad677f700ab0c1e5c54226984deea521b5e8b2b7abf11054f95a416e2" exitCode=0 Jan 06 08:33:37 crc kubenswrapper[4784]: I0106 08:33:37.196500 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-bfn2t" event={"ID":"e1e6cffd-3296-4d02-8566-5980e357fc22","Type":"ContainerDied","Data":"6443e9dad677f700ab0c1e5c54226984deea521b5e8b2b7abf11054f95a416e2"} Jan 06 08:33:37 crc kubenswrapper[4784]: I0106 08:33:37.198474 4784 generic.go:334] "Generic (PLEG): container finished" podID="3c39e426-916f-4f00-927d-fad49789b75e" containerID="f76c7ab82f482cec8a852fe97900303168514f1417b4ff6c7482e190aee111f0" exitCode=0 Jan 06 08:33:37 crc kubenswrapper[4784]: I0106 08:33:37.198533 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c594-account-create-update-k9b28" event={"ID":"3c39e426-916f-4f00-927d-fad49789b75e","Type":"ContainerDied","Data":"f76c7ab82f482cec8a852fe97900303168514f1417b4ff6c7482e190aee111f0"} Jan 06 08:33:37 crc kubenswrapper[4784]: I0106 08:33:37.198596 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c594-account-create-update-k9b28" event={"ID":"3c39e426-916f-4f00-927d-fad49789b75e","Type":"ContainerStarted","Data":"4c181feaed99df5b398a30d7a98085ac98af42213d2b5f12e9d765a63950db0a"} Jan 06 08:33:37 crc kubenswrapper[4784]: I0106 08:33:37.201241 4784 generic.go:334] "Generic (PLEG): container finished" podID="e64dac25-080f-43cf-9b56-8fca9d178614" containerID="1c912752fe46d84d9ed42c24893228b3462b90a1f34398c91345bd50a4f499de" exitCode=0 Jan 06 08:33:37 crc kubenswrapper[4784]: I0106 08:33:37.201311 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-r2bgl" event={"ID":"e64dac25-080f-43cf-9b56-8fca9d178614","Type":"ContainerDied","Data":"1c912752fe46d84d9ed42c24893228b3462b90a1f34398c91345bd50a4f499de"} Jan 06 08:33:37 crc kubenswrapper[4784]: I0106 08:33:37.201341 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-r2bgl" event={"ID":"e64dac25-080f-43cf-9b56-8fca9d178614","Type":"ContainerStarted","Data":"b94b6760d91f6450f52cfc0110ff474694979c05eff505305985239d72202619"} Jan 06 08:33:37 crc kubenswrapper[4784]: I0106 08:33:37.202912 4784 generic.go:334] "Generic (PLEG): container finished" podID="e0e26028-4d3e-493a-859e-2dd0149d7174" containerID="2da9f9b19996f605daa4e9a95f48246d59861d31537a6b9ba5feac254dd496e7" exitCode=0 Jan 06 08:33:37 crc kubenswrapper[4784]: I0106 08:33:37.202962 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-dc75-account-create-update-vmhbs" event={"ID":"e0e26028-4d3e-493a-859e-2dd0149d7174","Type":"ContainerDied","Data":"2da9f9b19996f605daa4e9a95f48246d59861d31537a6b9ba5feac254dd496e7"} Jan 06 08:33:37 crc kubenswrapper[4784]: I0106 08:33:37.202991 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-dc75-account-create-update-vmhbs" event={"ID":"e0e26028-4d3e-493a-859e-2dd0149d7174","Type":"ContainerStarted","Data":"6654199ef8be3481ac46e3c98f974196eee664093cc4714437f50efdceebacad"} Jan 06 08:33:38 crc kubenswrapper[4784]: I0106 08:33:38.112758 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:33:38 crc kubenswrapper[4784]: I0106 08:33:38.177915 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-mn8nz"] Jan 06 08:33:38 crc kubenswrapper[4784]: I0106 08:33:38.178215 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" podUID="75b5fcc5-44e3-4ae8-8e23-5f3654a2f601" containerName="dnsmasq-dns" containerID="cri-o://69fa32b0be31bf4c9776792cba40f8ee5098ba12abaa2327064fb250193add1e" gracePeriod=10 Jan 06 08:33:38 crc kubenswrapper[4784]: I0106 08:33:38.697352 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c594-account-create-update-k9b28" Jan 06 08:33:38 crc kubenswrapper[4784]: I0106 08:33:38.762036 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c39e426-916f-4f00-927d-fad49789b75e-operator-scripts\") pod \"3c39e426-916f-4f00-927d-fad49789b75e\" (UID: \"3c39e426-916f-4f00-927d-fad49789b75e\") " Jan 06 08:33:38 crc kubenswrapper[4784]: I0106 08:33:38.762210 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-plfkr\" (UniqueName: \"kubernetes.io/projected/3c39e426-916f-4f00-927d-fad49789b75e-kube-api-access-plfkr\") pod \"3c39e426-916f-4f00-927d-fad49789b75e\" (UID: \"3c39e426-916f-4f00-927d-fad49789b75e\") " Jan 06 08:33:38 crc kubenswrapper[4784]: I0106 08:33:38.765651 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c39e426-916f-4f00-927d-fad49789b75e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3c39e426-916f-4f00-927d-fad49789b75e" (UID: "3c39e426-916f-4f00-927d-fad49789b75e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:38 crc kubenswrapper[4784]: I0106 08:33:38.806909 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c39e426-916f-4f00-927d-fad49789b75e-kube-api-access-plfkr" (OuterVolumeSpecName: "kube-api-access-plfkr") pod "3c39e426-916f-4f00-927d-fad49789b75e" (UID: "3c39e426-916f-4f00-927d-fad49789b75e"). InnerVolumeSpecName "kube-api-access-plfkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:38 crc kubenswrapper[4784]: I0106 08:33:38.874031 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-plfkr\" (UniqueName: \"kubernetes.io/projected/3c39e426-916f-4f00-927d-fad49789b75e-kube-api-access-plfkr\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:38 crc kubenswrapper[4784]: I0106 08:33:38.874077 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c39e426-916f-4f00-927d-fad49789b75e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.003180 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-bfn2t" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.004189 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-c9v66" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.031339 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.079166 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbxg7\" (UniqueName: \"kubernetes.io/projected/e1e6cffd-3296-4d02-8566-5980e357fc22-kube-api-access-rbxg7\") pod \"e1e6cffd-3296-4d02-8566-5980e357fc22\" (UID: \"e1e6cffd-3296-4d02-8566-5980e357fc22\") " Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.079660 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98272c8d-18ec-4660-9be3-ea08362a5b57-operator-scripts\") pod \"98272c8d-18ec-4660-9be3-ea08362a5b57\" (UID: \"98272c8d-18ec-4660-9be3-ea08362a5b57\") " Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.079784 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1e6cffd-3296-4d02-8566-5980e357fc22-operator-scripts\") pod \"e1e6cffd-3296-4d02-8566-5980e357fc22\" (UID: \"e1e6cffd-3296-4d02-8566-5980e357fc22\") " Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.080006 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ggbxx\" (UniqueName: \"kubernetes.io/projected/98272c8d-18ec-4660-9be3-ea08362a5b57-kube-api-access-ggbxx\") pod \"98272c8d-18ec-4660-9be3-ea08362a5b57\" (UID: \"98272c8d-18ec-4660-9be3-ea08362a5b57\") " Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.087872 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98272c8d-18ec-4660-9be3-ea08362a5b57-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "98272c8d-18ec-4660-9be3-ea08362a5b57" (UID: "98272c8d-18ec-4660-9be3-ea08362a5b57"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.095387 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98272c8d-18ec-4660-9be3-ea08362a5b57-kube-api-access-ggbxx" (OuterVolumeSpecName: "kube-api-access-ggbxx") pod "98272c8d-18ec-4660-9be3-ea08362a5b57" (UID: "98272c8d-18ec-4660-9be3-ea08362a5b57"). InnerVolumeSpecName "kube-api-access-ggbxx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.112649 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1e6cffd-3296-4d02-8566-5980e357fc22-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e1e6cffd-3296-4d02-8566-5980e357fc22" (UID: "e1e6cffd-3296-4d02-8566-5980e357fc22"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.122235 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1e6cffd-3296-4d02-8566-5980e357fc22-kube-api-access-rbxg7" (OuterVolumeSpecName: "kube-api-access-rbxg7") pod "e1e6cffd-3296-4d02-8566-5980e357fc22" (UID: "e1e6cffd-3296-4d02-8566-5980e357fc22"). InnerVolumeSpecName "kube-api-access-rbxg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.145422 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc75-account-create-update-vmhbs" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.190204 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-dns-svc\") pod \"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601\" (UID: \"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601\") " Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.190295 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9v9x2\" (UniqueName: \"kubernetes.io/projected/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-kube-api-access-9v9x2\") pod \"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601\" (UID: \"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601\") " Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.190582 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-config\") pod \"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601\" (UID: \"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601\") " Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.191116 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbxg7\" (UniqueName: \"kubernetes.io/projected/e1e6cffd-3296-4d02-8566-5980e357fc22-kube-api-access-rbxg7\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.191135 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98272c8d-18ec-4660-9be3-ea08362a5b57-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.191147 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1e6cffd-3296-4d02-8566-5980e357fc22-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.191158 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ggbxx\" (UniqueName: \"kubernetes.io/projected/98272c8d-18ec-4660-9be3-ea08362a5b57-kube-api-access-ggbxx\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.208024 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-r2bgl" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.212844 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-kube-api-access-9v9x2" (OuterVolumeSpecName: "kube-api-access-9v9x2") pod "75b5fcc5-44e3-4ae8-8e23-5f3654a2f601" (UID: "75b5fcc5-44e3-4ae8-8e23-5f3654a2f601"). InnerVolumeSpecName "kube-api-access-9v9x2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.235728 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-c9v66" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.235743 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-c9v66" event={"ID":"98272c8d-18ec-4660-9be3-ea08362a5b57","Type":"ContainerDied","Data":"d413ea0d33806c4b85522c4f800a2a434dcc48c10876ee41da517725dc0cdc14"} Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.235809 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d413ea0d33806c4b85522c4f800a2a434dcc48c10876ee41da517725dc0cdc14" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.238015 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-config" (OuterVolumeSpecName: "config") pod "75b5fcc5-44e3-4ae8-8e23-5f3654a2f601" (UID: "75b5fcc5-44e3-4ae8-8e23-5f3654a2f601"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.243654 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "75b5fcc5-44e3-4ae8-8e23-5f3654a2f601" (UID: "75b5fcc5-44e3-4ae8-8e23-5f3654a2f601"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.249427 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-bfn2t" event={"ID":"e1e6cffd-3296-4d02-8566-5980e357fc22","Type":"ContainerDied","Data":"81c9f46c2bccdbb2d43db22089334309ca54bb7f5ed0223cd238e28ea16723d4"} Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.249480 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="81c9f46c2bccdbb2d43db22089334309ca54bb7f5ed0223cd238e28ea16723d4" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.249646 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-bfn2t" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.268936 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c594-account-create-update-k9b28" event={"ID":"3c39e426-916f-4f00-927d-fad49789b75e","Type":"ContainerDied","Data":"4c181feaed99df5b398a30d7a98085ac98af42213d2b5f12e9d765a63950db0a"} Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.268994 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4c181feaed99df5b398a30d7a98085ac98af42213d2b5f12e9d765a63950db0a" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.269023 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c594-account-create-update-k9b28" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.293643 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jx9jv\" (UniqueName: \"kubernetes.io/projected/e0e26028-4d3e-493a-859e-2dd0149d7174-kube-api-access-jx9jv\") pod \"e0e26028-4d3e-493a-859e-2dd0149d7174\" (UID: \"e0e26028-4d3e-493a-859e-2dd0149d7174\") " Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.293947 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7rr6\" (UniqueName: \"kubernetes.io/projected/e64dac25-080f-43cf-9b56-8fca9d178614-kube-api-access-j7rr6\") pod \"e64dac25-080f-43cf-9b56-8fca9d178614\" (UID: \"e64dac25-080f-43cf-9b56-8fca9d178614\") " Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.294012 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0e26028-4d3e-493a-859e-2dd0149d7174-operator-scripts\") pod \"e0e26028-4d3e-493a-859e-2dd0149d7174\" (UID: \"e0e26028-4d3e-493a-859e-2dd0149d7174\") " Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.294092 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e64dac25-080f-43cf-9b56-8fca9d178614-operator-scripts\") pod \"e64dac25-080f-43cf-9b56-8fca9d178614\" (UID: \"e64dac25-080f-43cf-9b56-8fca9d178614\") " Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.294917 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.294933 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9v9x2\" (UniqueName: \"kubernetes.io/projected/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-kube-api-access-9v9x2\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.294949 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.295686 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0e26028-4d3e-493a-859e-2dd0149d7174-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e0e26028-4d3e-493a-859e-2dd0149d7174" (UID: "e0e26028-4d3e-493a-859e-2dd0149d7174"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.295730 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e64dac25-080f-43cf-9b56-8fca9d178614-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e64dac25-080f-43cf-9b56-8fca9d178614" (UID: "e64dac25-080f-43cf-9b56-8fca9d178614"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.302891 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e64dac25-080f-43cf-9b56-8fca9d178614-kube-api-access-j7rr6" (OuterVolumeSpecName: "kube-api-access-j7rr6") pod "e64dac25-080f-43cf-9b56-8fca9d178614" (UID: "e64dac25-080f-43cf-9b56-8fca9d178614"). InnerVolumeSpecName "kube-api-access-j7rr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.303482 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0e26028-4d3e-493a-859e-2dd0149d7174-kube-api-access-jx9jv" (OuterVolumeSpecName: "kube-api-access-jx9jv") pod "e0e26028-4d3e-493a-859e-2dd0149d7174" (UID: "e0e26028-4d3e-493a-859e-2dd0149d7174"). InnerVolumeSpecName "kube-api-access-jx9jv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.304032 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-r2bgl" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.304190 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-r2bgl" event={"ID":"e64dac25-080f-43cf-9b56-8fca9d178614","Type":"ContainerDied","Data":"b94b6760d91f6450f52cfc0110ff474694979c05eff505305985239d72202619"} Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.304227 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b94b6760d91f6450f52cfc0110ff474694979c05eff505305985239d72202619" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.312083 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-dc75-account-create-update-vmhbs" event={"ID":"e0e26028-4d3e-493a-859e-2dd0149d7174","Type":"ContainerDied","Data":"6654199ef8be3481ac46e3c98f974196eee664093cc4714437f50efdceebacad"} Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.312138 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6654199ef8be3481ac46e3c98f974196eee664093cc4714437f50efdceebacad" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.312227 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc75-account-create-update-vmhbs" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.316337 4784 generic.go:334] "Generic (PLEG): container finished" podID="75b5fcc5-44e3-4ae8-8e23-5f3654a2f601" containerID="69fa32b0be31bf4c9776792cba40f8ee5098ba12abaa2327064fb250193add1e" exitCode=0 Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.316475 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" event={"ID":"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601","Type":"ContainerDied","Data":"69fa32b0be31bf4c9776792cba40f8ee5098ba12abaa2327064fb250193add1e"} Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.316645 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" event={"ID":"75b5fcc5-44e3-4ae8-8e23-5f3654a2f601","Type":"ContainerDied","Data":"45a4e6b5dbe27d7dac2c075f1621242b64c0bb214bf82bcef116327204ccfe02"} Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.316736 4784 scope.go:117] "RemoveContainer" containerID="69fa32b0be31bf4c9776792cba40f8ee5098ba12abaa2327064fb250193add1e" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.316979 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-mn8nz" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.368436 4784 scope.go:117] "RemoveContainer" containerID="671dbde473e8bb8f9e2f26df0474c6ced5a4a73e338b44933666a5404a42b753" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.389670 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-mn8nz"] Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.396673 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e64dac25-080f-43cf-9b56-8fca9d178614-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.398333 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jx9jv\" (UniqueName: \"kubernetes.io/projected/e0e26028-4d3e-493a-859e-2dd0149d7174-kube-api-access-jx9jv\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.398430 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7rr6\" (UniqueName: \"kubernetes.io/projected/e64dac25-080f-43cf-9b56-8fca9d178614-kube-api-access-j7rr6\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.398306 4784 scope.go:117] "RemoveContainer" containerID="69fa32b0be31bf4c9776792cba40f8ee5098ba12abaa2327064fb250193add1e" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.398502 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0e26028-4d3e-493a-859e-2dd0149d7174-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.397969 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-mn8nz"] Jan 06 08:33:39 crc kubenswrapper[4784]: E0106 08:33:39.399293 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69fa32b0be31bf4c9776792cba40f8ee5098ba12abaa2327064fb250193add1e\": container with ID starting with 69fa32b0be31bf4c9776792cba40f8ee5098ba12abaa2327064fb250193add1e not found: ID does not exist" containerID="69fa32b0be31bf4c9776792cba40f8ee5098ba12abaa2327064fb250193add1e" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.399338 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69fa32b0be31bf4c9776792cba40f8ee5098ba12abaa2327064fb250193add1e"} err="failed to get container status \"69fa32b0be31bf4c9776792cba40f8ee5098ba12abaa2327064fb250193add1e\": rpc error: code = NotFound desc = could not find container \"69fa32b0be31bf4c9776792cba40f8ee5098ba12abaa2327064fb250193add1e\": container with ID starting with 69fa32b0be31bf4c9776792cba40f8ee5098ba12abaa2327064fb250193add1e not found: ID does not exist" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.399373 4784 scope.go:117] "RemoveContainer" containerID="671dbde473e8bb8f9e2f26df0474c6ced5a4a73e338b44933666a5404a42b753" Jan 06 08:33:39 crc kubenswrapper[4784]: E0106 08:33:39.399947 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"671dbde473e8bb8f9e2f26df0474c6ced5a4a73e338b44933666a5404a42b753\": container with ID starting with 671dbde473e8bb8f9e2f26df0474c6ced5a4a73e338b44933666a5404a42b753 not found: ID does not exist" containerID="671dbde473e8bb8f9e2f26df0474c6ced5a4a73e338b44933666a5404a42b753" Jan 06 08:33:39 crc kubenswrapper[4784]: I0106 08:33:39.400102 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"671dbde473e8bb8f9e2f26df0474c6ced5a4a73e338b44933666a5404a42b753"} err="failed to get container status \"671dbde473e8bb8f9e2f26df0474c6ced5a4a73e338b44933666a5404a42b753\": rpc error: code = NotFound desc = could not find container \"671dbde473e8bb8f9e2f26df0474c6ced5a4a73e338b44933666a5404a42b753\": container with ID starting with 671dbde473e8bb8f9e2f26df0474c6ced5a4a73e338b44933666a5404a42b753 not found: ID does not exist" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.325779 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75b5fcc5-44e3-4ae8-8e23-5f3654a2f601" path="/var/lib/kubelet/pods/75b5fcc5-44e3-4ae8-8e23-5f3654a2f601/volumes" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.921824 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-6x9vm"] Jan 06 08:33:40 crc kubenswrapper[4784]: E0106 08:33:40.922398 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0e26028-4d3e-493a-859e-2dd0149d7174" containerName="mariadb-account-create-update" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.922431 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0e26028-4d3e-493a-859e-2dd0149d7174" containerName="mariadb-account-create-update" Jan 06 08:33:40 crc kubenswrapper[4784]: E0106 08:33:40.922464 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75b5fcc5-44e3-4ae8-8e23-5f3654a2f601" containerName="init" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.922477 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="75b5fcc5-44e3-4ae8-8e23-5f3654a2f601" containerName="init" Jan 06 08:33:40 crc kubenswrapper[4784]: E0106 08:33:40.922499 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98272c8d-18ec-4660-9be3-ea08362a5b57" containerName="mariadb-database-create" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.922511 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="98272c8d-18ec-4660-9be3-ea08362a5b57" containerName="mariadb-database-create" Jan 06 08:33:40 crc kubenswrapper[4784]: E0106 08:33:40.922533 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c39e426-916f-4f00-927d-fad49789b75e" containerName="mariadb-account-create-update" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.922659 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c39e426-916f-4f00-927d-fad49789b75e" containerName="mariadb-account-create-update" Jan 06 08:33:40 crc kubenswrapper[4784]: E0106 08:33:40.922699 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e64dac25-080f-43cf-9b56-8fca9d178614" containerName="mariadb-database-create" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.922712 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e64dac25-080f-43cf-9b56-8fca9d178614" containerName="mariadb-database-create" Jan 06 08:33:40 crc kubenswrapper[4784]: E0106 08:33:40.922740 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1e6cffd-3296-4d02-8566-5980e357fc22" containerName="mariadb-account-create-update" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.922751 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1e6cffd-3296-4d02-8566-5980e357fc22" containerName="mariadb-account-create-update" Jan 06 08:33:40 crc kubenswrapper[4784]: E0106 08:33:40.922784 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75b5fcc5-44e3-4ae8-8e23-5f3654a2f601" containerName="dnsmasq-dns" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.922797 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="75b5fcc5-44e3-4ae8-8e23-5f3654a2f601" containerName="dnsmasq-dns" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.923062 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1e6cffd-3296-4d02-8566-5980e357fc22" containerName="mariadb-account-create-update" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.923090 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c39e426-916f-4f00-927d-fad49789b75e" containerName="mariadb-account-create-update" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.923104 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0e26028-4d3e-493a-859e-2dd0149d7174" containerName="mariadb-account-create-update" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.923133 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="75b5fcc5-44e3-4ae8-8e23-5f3654a2f601" containerName="dnsmasq-dns" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.923149 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="98272c8d-18ec-4660-9be3-ea08362a5b57" containerName="mariadb-database-create" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.923173 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e64dac25-080f-43cf-9b56-8fca9d178614" containerName="mariadb-database-create" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.924208 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-6x9vm" Jan 06 08:33:40 crc kubenswrapper[4784]: I0106 08:33:40.933312 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-6x9vm"] Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.029925 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c994335-9c58-474f-8192-040dfb912747-operator-scripts\") pod \"glance-db-create-6x9vm\" (UID: \"1c994335-9c58-474f-8192-040dfb912747\") " pod="openstack/glance-db-create-6x9vm" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.030235 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szhrz\" (UniqueName: \"kubernetes.io/projected/1c994335-9c58-474f-8192-040dfb912747-kube-api-access-szhrz\") pod \"glance-db-create-6x9vm\" (UID: \"1c994335-9c58-474f-8192-040dfb912747\") " pod="openstack/glance-db-create-6x9vm" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.127585 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-28c1-account-create-update-fnqkv"] Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.129530 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-28c1-account-create-update-fnqkv" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.132613 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c994335-9c58-474f-8192-040dfb912747-operator-scripts\") pod \"glance-db-create-6x9vm\" (UID: \"1c994335-9c58-474f-8192-040dfb912747\") " pod="openstack/glance-db-create-6x9vm" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.132847 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szhrz\" (UniqueName: \"kubernetes.io/projected/1c994335-9c58-474f-8192-040dfb912747-kube-api-access-szhrz\") pod \"glance-db-create-6x9vm\" (UID: \"1c994335-9c58-474f-8192-040dfb912747\") " pod="openstack/glance-db-create-6x9vm" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.133426 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c994335-9c58-474f-8192-040dfb912747-operator-scripts\") pod \"glance-db-create-6x9vm\" (UID: \"1c994335-9c58-474f-8192-040dfb912747\") " pod="openstack/glance-db-create-6x9vm" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.133750 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.136901 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-28c1-account-create-update-fnqkv"] Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.182087 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szhrz\" (UniqueName: \"kubernetes.io/projected/1c994335-9c58-474f-8192-040dfb912747-kube-api-access-szhrz\") pod \"glance-db-create-6x9vm\" (UID: \"1c994335-9c58-474f-8192-040dfb912747\") " pod="openstack/glance-db-create-6x9vm" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.235299 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4311b21-f94c-4300-aaa2-dd1cea584334-operator-scripts\") pod \"glance-28c1-account-create-update-fnqkv\" (UID: \"b4311b21-f94c-4300-aaa2-dd1cea584334\") " pod="openstack/glance-28c1-account-create-update-fnqkv" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.235391 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pt2gl\" (UniqueName: \"kubernetes.io/projected/b4311b21-f94c-4300-aaa2-dd1cea584334-kube-api-access-pt2gl\") pod \"glance-28c1-account-create-update-fnqkv\" (UID: \"b4311b21-f94c-4300-aaa2-dd1cea584334\") " pod="openstack/glance-28c1-account-create-update-fnqkv" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.255127 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-6x9vm" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.338250 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4311b21-f94c-4300-aaa2-dd1cea584334-operator-scripts\") pod \"glance-28c1-account-create-update-fnqkv\" (UID: \"b4311b21-f94c-4300-aaa2-dd1cea584334\") " pod="openstack/glance-28c1-account-create-update-fnqkv" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.338328 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pt2gl\" (UniqueName: \"kubernetes.io/projected/b4311b21-f94c-4300-aaa2-dd1cea584334-kube-api-access-pt2gl\") pod \"glance-28c1-account-create-update-fnqkv\" (UID: \"b4311b21-f94c-4300-aaa2-dd1cea584334\") " pod="openstack/glance-28c1-account-create-update-fnqkv" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.339238 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4311b21-f94c-4300-aaa2-dd1cea584334-operator-scripts\") pod \"glance-28c1-account-create-update-fnqkv\" (UID: \"b4311b21-f94c-4300-aaa2-dd1cea584334\") " pod="openstack/glance-28c1-account-create-update-fnqkv" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.359249 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pt2gl\" (UniqueName: \"kubernetes.io/projected/b4311b21-f94c-4300-aaa2-dd1cea584334-kube-api-access-pt2gl\") pod \"glance-28c1-account-create-update-fnqkv\" (UID: \"b4311b21-f94c-4300-aaa2-dd1cea584334\") " pod="openstack/glance-28c1-account-create-update-fnqkv" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.451468 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-28c1-account-create-update-fnqkv" Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.804036 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-6x9vm"] Jan 06 08:33:41 crc kubenswrapper[4784]: W0106 08:33:41.968015 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4311b21_f94c_4300_aaa2_dd1cea584334.slice/crio-b993b63cf0fe2d70151480d7e59c9395a55de0bb90c6d3e46779c597d2473f1d WatchSource:0}: Error finding container b993b63cf0fe2d70151480d7e59c9395a55de0bb90c6d3e46779c597d2473f1d: Status 404 returned error can't find the container with id b993b63cf0fe2d70151480d7e59c9395a55de0bb90c6d3e46779c597d2473f1d Jan 06 08:33:41 crc kubenswrapper[4784]: I0106 08:33:41.969259 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-28c1-account-create-update-fnqkv"] Jan 06 08:33:42 crc kubenswrapper[4784]: I0106 08:33:42.374647 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-28c1-account-create-update-fnqkv" event={"ID":"b4311b21-f94c-4300-aaa2-dd1cea584334","Type":"ContainerStarted","Data":"b993b63cf0fe2d70151480d7e59c9395a55de0bb90c6d3e46779c597d2473f1d"} Jan 06 08:33:42 crc kubenswrapper[4784]: I0106 08:33:42.379688 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-6x9vm" event={"ID":"1c994335-9c58-474f-8192-040dfb912747","Type":"ContainerStarted","Data":"d7d99f6e0d7bff947f92366dfc08c9c548a7c8b06a7d668594a1f293286f3f14"} Jan 06 08:33:42 crc kubenswrapper[4784]: I0106 08:33:42.379743 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-6x9vm" event={"ID":"1c994335-9c58-474f-8192-040dfb912747","Type":"ContainerStarted","Data":"88dce2a65b99fa5f2de355cb88b4acf7b42172ed39973bba845b395d9a8d3372"} Jan 06 08:33:42 crc kubenswrapper[4784]: I0106 08:33:42.401348 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-6x9vm" podStartSLOduration=2.401322011 podStartE2EDuration="2.401322011s" podCreationTimestamp="2026-01-06 08:33:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:33:42.399813124 +0000 UTC m=+1124.445985961" watchObservedRunningTime="2026-01-06 08:33:42.401322011 +0000 UTC m=+1124.447494848" Jan 06 08:33:42 crc kubenswrapper[4784]: I0106 08:33:42.819681 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-bfn2t"] Jan 06 08:33:42 crc kubenswrapper[4784]: I0106 08:33:42.826238 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-bfn2t"] Jan 06 08:33:42 crc kubenswrapper[4784]: I0106 08:33:42.895126 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-fg6t2"] Jan 06 08:33:42 crc kubenswrapper[4784]: I0106 08:33:42.896871 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fg6t2" Jan 06 08:33:42 crc kubenswrapper[4784]: I0106 08:33:42.900442 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 06 08:33:42 crc kubenswrapper[4784]: I0106 08:33:42.903602 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-fg6t2"] Jan 06 08:33:43 crc kubenswrapper[4784]: I0106 08:33:43.005720 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7cbba6e7-1e02-4788-9e12-00c5c3391a86-operator-scripts\") pod \"root-account-create-update-fg6t2\" (UID: \"7cbba6e7-1e02-4788-9e12-00c5c3391a86\") " pod="openstack/root-account-create-update-fg6t2" Jan 06 08:33:43 crc kubenswrapper[4784]: I0106 08:33:43.006369 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8h6dd\" (UniqueName: \"kubernetes.io/projected/7cbba6e7-1e02-4788-9e12-00c5c3391a86-kube-api-access-8h6dd\") pod \"root-account-create-update-fg6t2\" (UID: \"7cbba6e7-1e02-4788-9e12-00c5c3391a86\") " pod="openstack/root-account-create-update-fg6t2" Jan 06 08:33:43 crc kubenswrapper[4784]: I0106 08:33:43.108826 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7cbba6e7-1e02-4788-9e12-00c5c3391a86-operator-scripts\") pod \"root-account-create-update-fg6t2\" (UID: \"7cbba6e7-1e02-4788-9e12-00c5c3391a86\") " pod="openstack/root-account-create-update-fg6t2" Jan 06 08:33:43 crc kubenswrapper[4784]: I0106 08:33:43.109426 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8h6dd\" (UniqueName: \"kubernetes.io/projected/7cbba6e7-1e02-4788-9e12-00c5c3391a86-kube-api-access-8h6dd\") pod \"root-account-create-update-fg6t2\" (UID: \"7cbba6e7-1e02-4788-9e12-00c5c3391a86\") " pod="openstack/root-account-create-update-fg6t2" Jan 06 08:33:43 crc kubenswrapper[4784]: I0106 08:33:43.109917 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7cbba6e7-1e02-4788-9e12-00c5c3391a86-operator-scripts\") pod \"root-account-create-update-fg6t2\" (UID: \"7cbba6e7-1e02-4788-9e12-00c5c3391a86\") " pod="openstack/root-account-create-update-fg6t2" Jan 06 08:33:43 crc kubenswrapper[4784]: I0106 08:33:43.135567 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8h6dd\" (UniqueName: \"kubernetes.io/projected/7cbba6e7-1e02-4788-9e12-00c5c3391a86-kube-api-access-8h6dd\") pod \"root-account-create-update-fg6t2\" (UID: \"7cbba6e7-1e02-4788-9e12-00c5c3391a86\") " pod="openstack/root-account-create-update-fg6t2" Jan 06 08:33:43 crc kubenswrapper[4784]: I0106 08:33:43.228152 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fg6t2" Jan 06 08:33:43 crc kubenswrapper[4784]: I0106 08:33:43.393391 4784 generic.go:334] "Generic (PLEG): container finished" podID="1c994335-9c58-474f-8192-040dfb912747" containerID="d7d99f6e0d7bff947f92366dfc08c9c548a7c8b06a7d668594a1f293286f3f14" exitCode=0 Jan 06 08:33:43 crc kubenswrapper[4784]: I0106 08:33:43.393948 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-6x9vm" event={"ID":"1c994335-9c58-474f-8192-040dfb912747","Type":"ContainerDied","Data":"d7d99f6e0d7bff947f92366dfc08c9c548a7c8b06a7d668594a1f293286f3f14"} Jan 06 08:33:43 crc kubenswrapper[4784]: I0106 08:33:43.396941 4784 generic.go:334] "Generic (PLEG): container finished" podID="036cffd0-4911-4b85-b573-5aefd8bd124a" containerID="5c0e38915efc283eeec7d3f028569e5f0d8bb0816bdfbc43c74e9799b2ed11e1" exitCode=0 Jan 06 08:33:43 crc kubenswrapper[4784]: I0106 08:33:43.397003 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-499kt" event={"ID":"036cffd0-4911-4b85-b573-5aefd8bd124a","Type":"ContainerDied","Data":"5c0e38915efc283eeec7d3f028569e5f0d8bb0816bdfbc43c74e9799b2ed11e1"} Jan 06 08:33:43 crc kubenswrapper[4784]: I0106 08:33:43.400200 4784 generic.go:334] "Generic (PLEG): container finished" podID="b4311b21-f94c-4300-aaa2-dd1cea584334" containerID="474a03023859df3ab17cd19508195c766002aabe9d1bfb5bf14278ae49ecd12b" exitCode=0 Jan 06 08:33:43 crc kubenswrapper[4784]: I0106 08:33:43.400274 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-28c1-account-create-update-fnqkv" event={"ID":"b4311b21-f94c-4300-aaa2-dd1cea584334","Type":"ContainerDied","Data":"474a03023859df3ab17cd19508195c766002aabe9d1bfb5bf14278ae49ecd12b"} Jan 06 08:33:43 crc kubenswrapper[4784]: I0106 08:33:43.706693 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-fg6t2"] Jan 06 08:33:43 crc kubenswrapper[4784]: W0106 08:33:43.707379 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7cbba6e7_1e02_4788_9e12_00c5c3391a86.slice/crio-e84081bf38f94a50bca38257a37b446ee9b6bb68651bef4e372ac7b9b9432121 WatchSource:0}: Error finding container e84081bf38f94a50bca38257a37b446ee9b6bb68651bef4e372ac7b9b9432121: Status 404 returned error can't find the container with id e84081bf38f94a50bca38257a37b446ee9b6bb68651bef4e372ac7b9b9432121 Jan 06 08:33:44 crc kubenswrapper[4784]: I0106 08:33:44.326365 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1e6cffd-3296-4d02-8566-5980e357fc22" path="/var/lib/kubelet/pods/e1e6cffd-3296-4d02-8566-5980e357fc22/volumes" Jan 06 08:33:44 crc kubenswrapper[4784]: I0106 08:33:44.421954 4784 generic.go:334] "Generic (PLEG): container finished" podID="41c89df0-d35f-4f47-86f3-71a2c0971d79" containerID="bd03e04330f8e1c997eb9c5a6519ec44ddc8665c10bea822cf494fdb01acc628" exitCode=0 Jan 06 08:33:44 crc kubenswrapper[4784]: I0106 08:33:44.422082 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"41c89df0-d35f-4f47-86f3-71a2c0971d79","Type":"ContainerDied","Data":"bd03e04330f8e1c997eb9c5a6519ec44ddc8665c10bea822cf494fdb01acc628"} Jan 06 08:33:44 crc kubenswrapper[4784]: I0106 08:33:44.428199 4784 generic.go:334] "Generic (PLEG): container finished" podID="7cbba6e7-1e02-4788-9e12-00c5c3391a86" containerID="650817aab5852a278d03cc46d30381a8b5a0cbac71aad2d2e3d4d2f3c23bf6bd" exitCode=0 Jan 06 08:33:44 crc kubenswrapper[4784]: I0106 08:33:44.428278 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-fg6t2" event={"ID":"7cbba6e7-1e02-4788-9e12-00c5c3391a86","Type":"ContainerDied","Data":"650817aab5852a278d03cc46d30381a8b5a0cbac71aad2d2e3d4d2f3c23bf6bd"} Jan 06 08:33:44 crc kubenswrapper[4784]: I0106 08:33:44.428307 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-fg6t2" event={"ID":"7cbba6e7-1e02-4788-9e12-00c5c3391a86","Type":"ContainerStarted","Data":"e84081bf38f94a50bca38257a37b446ee9b6bb68651bef4e372ac7b9b9432121"} Jan 06 08:33:44 crc kubenswrapper[4784]: I0106 08:33:44.850644 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:44 crc kubenswrapper[4784]: I0106 08:33:44.859246 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift\") pod \"swift-storage-0\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " pod="openstack/swift-storage-0" Jan 06 08:33:44 crc kubenswrapper[4784]: I0106 08:33:44.870486 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 06 08:33:44 crc kubenswrapper[4784]: I0106 08:33:44.874446 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-6x9vm" Jan 06 08:33:44 crc kubenswrapper[4784]: I0106 08:33:44.889850 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:44 crc kubenswrapper[4784]: I0106 08:33:44.900050 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-28c1-account-create-update-fnqkv" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.056161 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/036cffd0-4911-4b85-b573-5aefd8bd124a-scripts\") pod \"036cffd0-4911-4b85-b573-5aefd8bd124a\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.056254 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4311b21-f94c-4300-aaa2-dd1cea584334-operator-scripts\") pod \"b4311b21-f94c-4300-aaa2-dd1cea584334\" (UID: \"b4311b21-f94c-4300-aaa2-dd1cea584334\") " Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.056294 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/036cffd0-4911-4b85-b573-5aefd8bd124a-ring-data-devices\") pod \"036cffd0-4911-4b85-b573-5aefd8bd124a\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.056334 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szsbn\" (UniqueName: \"kubernetes.io/projected/036cffd0-4911-4b85-b573-5aefd8bd124a-kube-api-access-szsbn\") pod \"036cffd0-4911-4b85-b573-5aefd8bd124a\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.056386 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szhrz\" (UniqueName: \"kubernetes.io/projected/1c994335-9c58-474f-8192-040dfb912747-kube-api-access-szhrz\") pod \"1c994335-9c58-474f-8192-040dfb912747\" (UID: \"1c994335-9c58-474f-8192-040dfb912747\") " Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.056451 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-combined-ca-bundle\") pod \"036cffd0-4911-4b85-b573-5aefd8bd124a\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.056511 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c994335-9c58-474f-8192-040dfb912747-operator-scripts\") pod \"1c994335-9c58-474f-8192-040dfb912747\" (UID: \"1c994335-9c58-474f-8192-040dfb912747\") " Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.056603 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-swiftconf\") pod \"036cffd0-4911-4b85-b573-5aefd8bd124a\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.056692 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-dispersionconf\") pod \"036cffd0-4911-4b85-b573-5aefd8bd124a\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.056736 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pt2gl\" (UniqueName: \"kubernetes.io/projected/b4311b21-f94c-4300-aaa2-dd1cea584334-kube-api-access-pt2gl\") pod \"b4311b21-f94c-4300-aaa2-dd1cea584334\" (UID: \"b4311b21-f94c-4300-aaa2-dd1cea584334\") " Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.056780 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/036cffd0-4911-4b85-b573-5aefd8bd124a-etc-swift\") pod \"036cffd0-4911-4b85-b573-5aefd8bd124a\" (UID: \"036cffd0-4911-4b85-b573-5aefd8bd124a\") " Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.059219 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/036cffd0-4911-4b85-b573-5aefd8bd124a-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "036cffd0-4911-4b85-b573-5aefd8bd124a" (UID: "036cffd0-4911-4b85-b573-5aefd8bd124a"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.060275 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c994335-9c58-474f-8192-040dfb912747-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1c994335-9c58-474f-8192-040dfb912747" (UID: "1c994335-9c58-474f-8192-040dfb912747"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.061233 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4311b21-f94c-4300-aaa2-dd1cea584334-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b4311b21-f94c-4300-aaa2-dd1cea584334" (UID: "b4311b21-f94c-4300-aaa2-dd1cea584334"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.062259 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/036cffd0-4911-4b85-b573-5aefd8bd124a-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "036cffd0-4911-4b85-b573-5aefd8bd124a" (UID: "036cffd0-4911-4b85-b573-5aefd8bd124a"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.064391 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4311b21-f94c-4300-aaa2-dd1cea584334-kube-api-access-pt2gl" (OuterVolumeSpecName: "kube-api-access-pt2gl") pod "b4311b21-f94c-4300-aaa2-dd1cea584334" (UID: "b4311b21-f94c-4300-aaa2-dd1cea584334"). InnerVolumeSpecName "kube-api-access-pt2gl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.068131 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c994335-9c58-474f-8192-040dfb912747-kube-api-access-szhrz" (OuterVolumeSpecName: "kube-api-access-szhrz") pod "1c994335-9c58-474f-8192-040dfb912747" (UID: "1c994335-9c58-474f-8192-040dfb912747"). InnerVolumeSpecName "kube-api-access-szhrz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.069027 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "036cffd0-4911-4b85-b573-5aefd8bd124a" (UID: "036cffd0-4911-4b85-b573-5aefd8bd124a"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.069801 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/036cffd0-4911-4b85-b573-5aefd8bd124a-kube-api-access-szsbn" (OuterVolumeSpecName: "kube-api-access-szsbn") pod "036cffd0-4911-4b85-b573-5aefd8bd124a" (UID: "036cffd0-4911-4b85-b573-5aefd8bd124a"). InnerVolumeSpecName "kube-api-access-szsbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.083841 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/036cffd0-4911-4b85-b573-5aefd8bd124a-scripts" (OuterVolumeSpecName: "scripts") pod "036cffd0-4911-4b85-b573-5aefd8bd124a" (UID: "036cffd0-4911-4b85-b573-5aefd8bd124a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.086308 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "036cffd0-4911-4b85-b573-5aefd8bd124a" (UID: "036cffd0-4911-4b85-b573-5aefd8bd124a"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.101744 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "036cffd0-4911-4b85-b573-5aefd8bd124a" (UID: "036cffd0-4911-4b85-b573-5aefd8bd124a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.159403 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c994335-9c58-474f-8192-040dfb912747-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.159502 4784 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.159518 4784 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.159533 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pt2gl\" (UniqueName: \"kubernetes.io/projected/b4311b21-f94c-4300-aaa2-dd1cea584334-kube-api-access-pt2gl\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.159564 4784 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/036cffd0-4911-4b85-b573-5aefd8bd124a-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.159577 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/036cffd0-4911-4b85-b573-5aefd8bd124a-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.159591 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4311b21-f94c-4300-aaa2-dd1cea584334-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.159604 4784 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/036cffd0-4911-4b85-b573-5aefd8bd124a-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.159619 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szsbn\" (UniqueName: \"kubernetes.io/projected/036cffd0-4911-4b85-b573-5aefd8bd124a-kube-api-access-szsbn\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.159632 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szhrz\" (UniqueName: \"kubernetes.io/projected/1c994335-9c58-474f-8192-040dfb912747-kube-api-access-szhrz\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.159644 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036cffd0-4911-4b85-b573-5aefd8bd124a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.447110 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-499kt" event={"ID":"036cffd0-4911-4b85-b573-5aefd8bd124a","Type":"ContainerDied","Data":"b4d9588ff30afb3c5986e0f469fcf8dfc3132f8cae4369aff9789982f75df1f4"} Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.447188 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4d9588ff30afb3c5986e0f469fcf8dfc3132f8cae4369aff9789982f75df1f4" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.447327 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-499kt" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.450406 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-28c1-account-create-update-fnqkv" event={"ID":"b4311b21-f94c-4300-aaa2-dd1cea584334","Type":"ContainerDied","Data":"b993b63cf0fe2d70151480d7e59c9395a55de0bb90c6d3e46779c597d2473f1d"} Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.450463 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b993b63cf0fe2d70151480d7e59c9395a55de0bb90c6d3e46779c597d2473f1d" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.450562 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-28c1-account-create-update-fnqkv" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.467494 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"41c89df0-d35f-4f47-86f3-71a2c0971d79","Type":"ContainerStarted","Data":"d32e01d069e2a7fe432e20265968b48ead1ba6a001b6421c4e55bfdf12b10616"} Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.470373 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.472385 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-6x9vm" event={"ID":"1c994335-9c58-474f-8192-040dfb912747","Type":"ContainerDied","Data":"88dce2a65b99fa5f2de355cb88b4acf7b42172ed39973bba845b395d9a8d3372"} Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.472448 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88dce2a65b99fa5f2de355cb88b4acf7b42172ed39973bba845b395d9a8d3372" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.472593 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-6x9vm" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.498635 4784 generic.go:334] "Generic (PLEG): container finished" podID="052ecaa6-58fd-42ed-b2c5-6b8919470619" containerID="613af1447384aa02c92ffc00120a9eb3d6a1362e2f325edc92e8fcc3a2447c9f" exitCode=0 Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.498919 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"052ecaa6-58fd-42ed-b2c5-6b8919470619","Type":"ContainerDied","Data":"613af1447384aa02c92ffc00120a9eb3d6a1362e2f325edc92e8fcc3a2447c9f"} Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.542997 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=56.321939568 podStartE2EDuration="1m4.542930505s" podCreationTimestamp="2026-01-06 08:32:41 +0000 UTC" firstStartedPulling="2026-01-06 08:33:01.147737105 +0000 UTC m=+1083.193909942" lastFinishedPulling="2026-01-06 08:33:09.368728042 +0000 UTC m=+1091.414900879" observedRunningTime="2026-01-06 08:33:45.533429469 +0000 UTC m=+1127.579602306" watchObservedRunningTime="2026-01-06 08:33:45.542930505 +0000 UTC m=+1127.589103352" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.568094 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 06 08:33:45 crc kubenswrapper[4784]: E0106 08:33:45.805120 4784 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod036cffd0_4911_4b85_b573_5aefd8bd124a.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c994335_9c58_474f_8192_040dfb912747.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c994335_9c58_474f_8192_040dfb912747.slice/crio-88dce2a65b99fa5f2de355cb88b4acf7b42172ed39973bba845b395d9a8d3372\": RecentStats: unable to find data in memory cache]" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.814029 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fg6t2" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.979284 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7cbba6e7-1e02-4788-9e12-00c5c3391a86-operator-scripts\") pod \"7cbba6e7-1e02-4788-9e12-00c5c3391a86\" (UID: \"7cbba6e7-1e02-4788-9e12-00c5c3391a86\") " Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.979378 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8h6dd\" (UniqueName: \"kubernetes.io/projected/7cbba6e7-1e02-4788-9e12-00c5c3391a86-kube-api-access-8h6dd\") pod \"7cbba6e7-1e02-4788-9e12-00c5c3391a86\" (UID: \"7cbba6e7-1e02-4788-9e12-00c5c3391a86\") " Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.980343 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7cbba6e7-1e02-4788-9e12-00c5c3391a86-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7cbba6e7-1e02-4788-9e12-00c5c3391a86" (UID: "7cbba6e7-1e02-4788-9e12-00c5c3391a86"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:45 crc kubenswrapper[4784]: I0106 08:33:45.984877 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cbba6e7-1e02-4788-9e12-00c5c3391a86-kube-api-access-8h6dd" (OuterVolumeSpecName: "kube-api-access-8h6dd") pod "7cbba6e7-1e02-4788-9e12-00c5c3391a86" (UID: "7cbba6e7-1e02-4788-9e12-00c5c3391a86"). InnerVolumeSpecName "kube-api-access-8h6dd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.082254 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8h6dd\" (UniqueName: \"kubernetes.io/projected/7cbba6e7-1e02-4788-9e12-00c5c3391a86-kube-api-access-8h6dd\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.082304 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7cbba6e7-1e02-4788-9e12-00c5c3391a86-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.251143 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-8tvjg" podUID="0ef35db6-a440-4394-a26f-750a29488828" containerName="ovn-controller" probeResult="failure" output=< Jan 06 08:33:46 crc kubenswrapper[4784]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 06 08:33:46 crc kubenswrapper[4784]: > Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.269959 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-vwmz9"] Jan 06 08:33:46 crc kubenswrapper[4784]: E0106 08:33:46.270635 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cbba6e7-1e02-4788-9e12-00c5c3391a86" containerName="mariadb-account-create-update" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.270667 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cbba6e7-1e02-4788-9e12-00c5c3391a86" containerName="mariadb-account-create-update" Jan 06 08:33:46 crc kubenswrapper[4784]: E0106 08:33:46.270685 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c994335-9c58-474f-8192-040dfb912747" containerName="mariadb-database-create" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.270694 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c994335-9c58-474f-8192-040dfb912747" containerName="mariadb-database-create" Jan 06 08:33:46 crc kubenswrapper[4784]: E0106 08:33:46.270708 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="036cffd0-4911-4b85-b573-5aefd8bd124a" containerName="swift-ring-rebalance" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.270725 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="036cffd0-4911-4b85-b573-5aefd8bd124a" containerName="swift-ring-rebalance" Jan 06 08:33:46 crc kubenswrapper[4784]: E0106 08:33:46.270744 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4311b21-f94c-4300-aaa2-dd1cea584334" containerName="mariadb-account-create-update" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.270751 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4311b21-f94c-4300-aaa2-dd1cea584334" containerName="mariadb-account-create-update" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.271029 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="036cffd0-4911-4b85-b573-5aefd8bd124a" containerName="swift-ring-rebalance" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.271065 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c994335-9c58-474f-8192-040dfb912747" containerName="mariadb-database-create" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.271081 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4311b21-f94c-4300-aaa2-dd1cea584334" containerName="mariadb-account-create-update" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.271094 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cbba6e7-1e02-4788-9e12-00c5c3391a86" containerName="mariadb-account-create-update" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.271934 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-vwmz9" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.275120 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.279866 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-vwmz9"] Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.280188 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-8kx7t" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.281409 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.290193 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.389187 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-config-data\") pod \"glance-db-sync-vwmz9\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " pod="openstack/glance-db-sync-vwmz9" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.389323 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sljff\" (UniqueName: \"kubernetes.io/projected/81b4221b-0db4-4391-a152-951dbe6700cb-kube-api-access-sljff\") pod \"glance-db-sync-vwmz9\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " pod="openstack/glance-db-sync-vwmz9" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.389405 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-db-sync-config-data\") pod \"glance-db-sync-vwmz9\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " pod="openstack/glance-db-sync-vwmz9" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.389491 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-combined-ca-bundle\") pod \"glance-db-sync-vwmz9\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " pod="openstack/glance-db-sync-vwmz9" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.508804 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-combined-ca-bundle\") pod \"glance-db-sync-vwmz9\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " pod="openstack/glance-db-sync-vwmz9" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.509312 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-config-data\") pod \"glance-db-sync-vwmz9\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " pod="openstack/glance-db-sync-vwmz9" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.509435 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sljff\" (UniqueName: \"kubernetes.io/projected/81b4221b-0db4-4391-a152-951dbe6700cb-kube-api-access-sljff\") pod \"glance-db-sync-vwmz9\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " pod="openstack/glance-db-sync-vwmz9" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.509560 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-db-sync-config-data\") pod \"glance-db-sync-vwmz9\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " pod="openstack/glance-db-sync-vwmz9" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.527571 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-config-data\") pod \"glance-db-sync-vwmz9\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " pod="openstack/glance-db-sync-vwmz9" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.531304 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-combined-ca-bundle\") pod \"glance-db-sync-vwmz9\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " pod="openstack/glance-db-sync-vwmz9" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.561027 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-8tvjg-config-gn8v4"] Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.563044 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-db-sync-config-data\") pod \"glance-db-sync-vwmz9\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " pod="openstack/glance-db-sync-vwmz9" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.564167 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sljff\" (UniqueName: \"kubernetes.io/projected/81b4221b-0db4-4391-a152-951dbe6700cb-kube-api-access-sljff\") pod \"glance-db-sync-vwmz9\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " pod="openstack/glance-db-sync-vwmz9" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.565879 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.589108 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.598187 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8tvjg-config-gn8v4"] Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.598768 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-vwmz9" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.599019 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"052ecaa6-58fd-42ed-b2c5-6b8919470619","Type":"ContainerStarted","Data":"f24ed2b7a2fbd1b9b4d0209b2b0448142937b3cd525d3833238d00d846deb5fc"} Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.599626 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.602893 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"43d68d465367327138f9318b678acd553a47ebe35728cf4ca49b45e2b8308e3c"} Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.608104 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fg6t2" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.608379 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-fg6t2" event={"ID":"7cbba6e7-1e02-4788-9e12-00c5c3391a86","Type":"ContainerDied","Data":"e84081bf38f94a50bca38257a37b446ee9b6bb68651bef4e372ac7b9b9432121"} Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.608421 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e84081bf38f94a50bca38257a37b446ee9b6bb68651bef4e372ac7b9b9432121" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.662841 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=55.082847522 podStartE2EDuration="1m5.662814955s" podCreationTimestamp="2026-01-06 08:32:41 +0000 UTC" firstStartedPulling="2026-01-06 08:32:59.025159601 +0000 UTC m=+1081.071332438" lastFinishedPulling="2026-01-06 08:33:09.605127034 +0000 UTC m=+1091.651299871" observedRunningTime="2026-01-06 08:33:46.643784472 +0000 UTC m=+1128.689957309" watchObservedRunningTime="2026-01-06 08:33:46.662814955 +0000 UTC m=+1128.708987792" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.713431 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b7313b11-bbcf-4ffe-894c-ff0a0937918a-additional-scripts\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.713645 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-run-ovn\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.713713 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5jkh\" (UniqueName: \"kubernetes.io/projected/b7313b11-bbcf-4ffe-894c-ff0a0937918a-kube-api-access-k5jkh\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.713775 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-run\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.713979 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-log-ovn\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.714791 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b7313b11-bbcf-4ffe-894c-ff0a0937918a-scripts\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.819300 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-log-ovn\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.819859 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-log-ovn\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.819919 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b7313b11-bbcf-4ffe-894c-ff0a0937918a-scripts\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.820007 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b7313b11-bbcf-4ffe-894c-ff0a0937918a-additional-scripts\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.820041 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-run-ovn\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.820092 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5jkh\" (UniqueName: \"kubernetes.io/projected/b7313b11-bbcf-4ffe-894c-ff0a0937918a-kube-api-access-k5jkh\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.820129 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-run\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.820328 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-run\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.820378 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-run-ovn\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.821318 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b7313b11-bbcf-4ffe-894c-ff0a0937918a-additional-scripts\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.822307 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b7313b11-bbcf-4ffe-894c-ff0a0937918a-scripts\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.843505 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5jkh\" (UniqueName: \"kubernetes.io/projected/b7313b11-bbcf-4ffe-894c-ff0a0937918a-kube-api-access-k5jkh\") pod \"ovn-controller-8tvjg-config-gn8v4\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:46 crc kubenswrapper[4784]: I0106 08:33:46.944428 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:47 crc kubenswrapper[4784]: I0106 08:33:47.247591 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-vwmz9"] Jan 06 08:33:47 crc kubenswrapper[4784]: I0106 08:33:47.557886 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8tvjg-config-gn8v4"] Jan 06 08:33:47 crc kubenswrapper[4784]: I0106 08:33:47.617358 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-vwmz9" event={"ID":"81b4221b-0db4-4391-a152-951dbe6700cb","Type":"ContainerStarted","Data":"b26f9e7256d8c19045f06c665ba462a488073f7d2ed9bf32c9fe4bd85b3ac705"} Jan 06 08:33:47 crc kubenswrapper[4784]: I0106 08:33:47.623714 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"353a0302ae512e9895ae799f830966d4415b4ddf7909c1b5ffbcba497511d1de"} Jan 06 08:33:47 crc kubenswrapper[4784]: I0106 08:33:47.623771 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"6113f42bede3a27a0cb54b6277716f1e062747b6f20d0f6170df3e915df36563"} Jan 06 08:33:47 crc kubenswrapper[4784]: I0106 08:33:47.625411 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8tvjg-config-gn8v4" event={"ID":"b7313b11-bbcf-4ffe-894c-ff0a0937918a","Type":"ContainerStarted","Data":"464dbab06164c16117933ea3e09917d2f8d37c72656dd0e98d8450fcb6638869"} Jan 06 08:33:48 crc kubenswrapper[4784]: I0106 08:33:48.641092 4784 generic.go:334] "Generic (PLEG): container finished" podID="b7313b11-bbcf-4ffe-894c-ff0a0937918a" containerID="03a6685581a4e19c9381b6823b522e80b5572a31d8881d1df2fd2be3a6a2817e" exitCode=0 Jan 06 08:33:48 crc kubenswrapper[4784]: I0106 08:33:48.641220 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8tvjg-config-gn8v4" event={"ID":"b7313b11-bbcf-4ffe-894c-ff0a0937918a","Type":"ContainerDied","Data":"03a6685581a4e19c9381b6823b522e80b5572a31d8881d1df2fd2be3a6a2817e"} Jan 06 08:33:48 crc kubenswrapper[4784]: I0106 08:33:48.653666 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"4e841199dad3d57bec03b6ab32443378e9a39fb3254ba456545d280228b18564"} Jan 06 08:33:48 crc kubenswrapper[4784]: I0106 08:33:48.653737 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"0410f39e8c4c8bc197844907a33893de7c643c3e9ad4fa4c7538fe09ef8e89e5"} Jan 06 08:33:49 crc kubenswrapper[4784]: I0106 08:33:49.668667 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"bf572454575eb77381e725ef9250e47418c50e419d4f2e3931a28d2c8d07717d"} Jan 06 08:33:49 crc kubenswrapper[4784]: I0106 08:33:49.669410 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"869eb46b39bba54be94fbd147453143836362ba61706362d4c5f22a8bd537f78"} Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.133469 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.185300 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-run-ovn\") pod \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.185486 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-log-ovn\") pod \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.185633 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b7313b11-bbcf-4ffe-894c-ff0a0937918a-additional-scripts\") pod \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.185617 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "b7313b11-bbcf-4ffe-894c-ff0a0937918a" (UID: "b7313b11-bbcf-4ffe-894c-ff0a0937918a"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.185662 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b7313b11-bbcf-4ffe-894c-ff0a0937918a-scripts\") pod \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.185693 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "b7313b11-bbcf-4ffe-894c-ff0a0937918a" (UID: "b7313b11-bbcf-4ffe-894c-ff0a0937918a"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.185731 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5jkh\" (UniqueName: \"kubernetes.io/projected/b7313b11-bbcf-4ffe-894c-ff0a0937918a-kube-api-access-k5jkh\") pod \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.185884 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-run\") pod \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\" (UID: \"b7313b11-bbcf-4ffe-894c-ff0a0937918a\") " Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.186310 4784 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.186330 4784 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.186384 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-run" (OuterVolumeSpecName: "var-run") pod "b7313b11-bbcf-4ffe-894c-ff0a0937918a" (UID: "b7313b11-bbcf-4ffe-894c-ff0a0937918a"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.187039 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7313b11-bbcf-4ffe-894c-ff0a0937918a-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "b7313b11-bbcf-4ffe-894c-ff0a0937918a" (UID: "b7313b11-bbcf-4ffe-894c-ff0a0937918a"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.188814 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7313b11-bbcf-4ffe-894c-ff0a0937918a-scripts" (OuterVolumeSpecName: "scripts") pod "b7313b11-bbcf-4ffe-894c-ff0a0937918a" (UID: "b7313b11-bbcf-4ffe-894c-ff0a0937918a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.201360 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7313b11-bbcf-4ffe-894c-ff0a0937918a-kube-api-access-k5jkh" (OuterVolumeSpecName: "kube-api-access-k5jkh") pod "b7313b11-bbcf-4ffe-894c-ff0a0937918a" (UID: "b7313b11-bbcf-4ffe-894c-ff0a0937918a"). InnerVolumeSpecName "kube-api-access-k5jkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.287952 4784 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b7313b11-bbcf-4ffe-894c-ff0a0937918a-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.287994 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b7313b11-bbcf-4ffe-894c-ff0a0937918a-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.288006 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5jkh\" (UniqueName: \"kubernetes.io/projected/b7313b11-bbcf-4ffe-894c-ff0a0937918a-kube-api-access-k5jkh\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.288020 4784 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b7313b11-bbcf-4ffe-894c-ff0a0937918a-var-run\") on node \"crc\" DevicePath \"\"" Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.683684 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"fdfc98f3c2d5c499ec740f96d17f27d1a06a4729300bbfc140e64fc4172f8f42"} Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.683776 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"26e897885f00517a75035b5f5164eb8d210634e06114a25a61554dd7abccebce"} Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.686232 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8tvjg-config-gn8v4" event={"ID":"b7313b11-bbcf-4ffe-894c-ff0a0937918a","Type":"ContainerDied","Data":"464dbab06164c16117933ea3e09917d2f8d37c72656dd0e98d8450fcb6638869"} Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.686278 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="464dbab06164c16117933ea3e09917d2f8d37c72656dd0e98d8450fcb6638869" Jan 06 08:33:50 crc kubenswrapper[4784]: I0106 08:33:50.686334 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8tvjg-config-gn8v4" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.249141 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-8tvjg" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.251136 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-8tvjg-config-gn8v4"] Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.263765 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-8tvjg-config-gn8v4"] Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.402019 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-8tvjg-config-vzv5b"] Jan 06 08:33:51 crc kubenswrapper[4784]: E0106 08:33:51.402598 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7313b11-bbcf-4ffe-894c-ff0a0937918a" containerName="ovn-config" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.402625 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7313b11-bbcf-4ffe-894c-ff0a0937918a" containerName="ovn-config" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.402857 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7313b11-bbcf-4ffe-894c-ff0a0937918a" containerName="ovn-config" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.403666 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.406066 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.420834 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8tvjg-config-vzv5b"] Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.512445 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2twc\" (UniqueName: \"kubernetes.io/projected/e0e826e9-2dbc-4d49-96c2-e21265a9907a-kube-api-access-d2twc\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.512530 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e0e826e9-2dbc-4d49-96c2-e21265a9907a-additional-scripts\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.512625 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-run-ovn\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.512683 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-log-ovn\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.512788 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e0e826e9-2dbc-4d49-96c2-e21265a9907a-scripts\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.512940 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-run\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.615188 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2twc\" (UniqueName: \"kubernetes.io/projected/e0e826e9-2dbc-4d49-96c2-e21265a9907a-kube-api-access-d2twc\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.615269 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e0e826e9-2dbc-4d49-96c2-e21265a9907a-additional-scripts\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.615317 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-run-ovn\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.615364 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-log-ovn\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.615381 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e0e826e9-2dbc-4d49-96c2-e21265a9907a-scripts\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.615416 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-run\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.615748 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-log-ovn\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.615772 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-run\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.616157 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e0e826e9-2dbc-4d49-96c2-e21265a9907a-additional-scripts\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.618097 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e0e826e9-2dbc-4d49-96c2-e21265a9907a-scripts\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.618175 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-run-ovn\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.636619 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2twc\" (UniqueName: \"kubernetes.io/projected/e0e826e9-2dbc-4d49-96c2-e21265a9907a-kube-api-access-d2twc\") pod \"ovn-controller-8tvjg-config-vzv5b\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:51 crc kubenswrapper[4784]: I0106 08:33:51.735135 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:33:52 crc kubenswrapper[4784]: I0106 08:33:52.154899 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8tvjg-config-vzv5b"] Jan 06 08:33:52 crc kubenswrapper[4784]: I0106 08:33:52.324922 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7313b11-bbcf-4ffe-894c-ff0a0937918a" path="/var/lib/kubelet/pods/b7313b11-bbcf-4ffe-894c-ff0a0937918a/volumes" Jan 06 08:33:52 crc kubenswrapper[4784]: I0106 08:33:52.725109 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"2782e4e954d402d8644c704c14fc8b38760f649dd39ca5a39f52b8e5a86c03a1"} Jan 06 08:33:52 crc kubenswrapper[4784]: I0106 08:33:52.725665 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"c87da68debafaf9f2a7c72f8afee9751c29efd20b0a9f522e25b46cbb9829297"} Jan 06 08:33:52 crc kubenswrapper[4784]: I0106 08:33:52.725683 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"7d3b4c93b777e722f9e32993854eee837ecfe60db1b929d1c3283452b9fc478a"} Jan 06 08:33:52 crc kubenswrapper[4784]: I0106 08:33:52.725697 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"048d0fdb57c0dd31a395544856c4165cd53c4598bcc0c60dcd03e55e8e8cb6bd"} Jan 06 08:33:52 crc kubenswrapper[4784]: I0106 08:33:52.728491 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8tvjg-config-vzv5b" event={"ID":"e0e826e9-2dbc-4d49-96c2-e21265a9907a","Type":"ContainerStarted","Data":"2c45ac306d7abdef48f3e6ee949c4ec7dc07bfed1e934d3efbf3a711946837e2"} Jan 06 08:33:52 crc kubenswrapper[4784]: I0106 08:33:52.728524 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8tvjg-config-vzv5b" event={"ID":"e0e826e9-2dbc-4d49-96c2-e21265a9907a","Type":"ContainerStarted","Data":"e4b858c5896578bc4a3fe57ed36bd37217838ecc43af9d64e1524307d366b7b0"} Jan 06 08:33:52 crc kubenswrapper[4784]: I0106 08:33:52.763037 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-8tvjg-config-vzv5b" podStartSLOduration=1.763017478 podStartE2EDuration="1.763017478s" podCreationTimestamp="2026-01-06 08:33:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:33:52.76145957 +0000 UTC m=+1134.807632407" watchObservedRunningTime="2026-01-06 08:33:52.763017478 +0000 UTC m=+1134.809190315" Jan 06 08:33:53 crc kubenswrapper[4784]: I0106 08:33:53.739609 4784 generic.go:334] "Generic (PLEG): container finished" podID="e0e826e9-2dbc-4d49-96c2-e21265a9907a" containerID="2c45ac306d7abdef48f3e6ee949c4ec7dc07bfed1e934d3efbf3a711946837e2" exitCode=0 Jan 06 08:33:53 crc kubenswrapper[4784]: I0106 08:33:53.739757 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8tvjg-config-vzv5b" event={"ID":"e0e826e9-2dbc-4d49-96c2-e21265a9907a","Type":"ContainerDied","Data":"2c45ac306d7abdef48f3e6ee949c4ec7dc07bfed1e934d3efbf3a711946837e2"} Jan 06 08:33:55 crc kubenswrapper[4784]: I0106 08:33:55.766517 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"92c958d9eb2eef729c21a59d1aa5cd51f0a60f0eee60721df4067e8956812f0d"} Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.159374 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.311424 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-run-ovn\") pod \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.311555 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-run\") pod \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.311637 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "e0e826e9-2dbc-4d49-96c2-e21265a9907a" (UID: "e0e826e9-2dbc-4d49-96c2-e21265a9907a"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.311695 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e0e826e9-2dbc-4d49-96c2-e21265a9907a-additional-scripts\") pod \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.311725 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-run" (OuterVolumeSpecName: "var-run") pod "e0e826e9-2dbc-4d49-96c2-e21265a9907a" (UID: "e0e826e9-2dbc-4d49-96c2-e21265a9907a"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.311794 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2twc\" (UniqueName: \"kubernetes.io/projected/e0e826e9-2dbc-4d49-96c2-e21265a9907a-kube-api-access-d2twc\") pod \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.312683 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0e826e9-2dbc-4d49-96c2-e21265a9907a-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "e0e826e9-2dbc-4d49-96c2-e21265a9907a" (UID: "e0e826e9-2dbc-4d49-96c2-e21265a9907a"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.312779 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-log-ovn\") pod \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.312874 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e0e826e9-2dbc-4d49-96c2-e21265a9907a-scripts\") pod \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\" (UID: \"e0e826e9-2dbc-4d49-96c2-e21265a9907a\") " Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.312900 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "e0e826e9-2dbc-4d49-96c2-e21265a9907a" (UID: "e0e826e9-2dbc-4d49-96c2-e21265a9907a"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.313437 4784 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e0e826e9-2dbc-4d49-96c2-e21265a9907a-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.313508 4784 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.313575 4784 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.313593 4784 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e0e826e9-2dbc-4d49-96c2-e21265a9907a-var-run\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.314331 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0e826e9-2dbc-4d49-96c2-e21265a9907a-scripts" (OuterVolumeSpecName: "scripts") pod "e0e826e9-2dbc-4d49-96c2-e21265a9907a" (UID: "e0e826e9-2dbc-4d49-96c2-e21265a9907a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.316238 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0e826e9-2dbc-4d49-96c2-e21265a9907a-kube-api-access-d2twc" (OuterVolumeSpecName: "kube-api-access-d2twc") pod "e0e826e9-2dbc-4d49-96c2-e21265a9907a" (UID: "e0e826e9-2dbc-4d49-96c2-e21265a9907a"). InnerVolumeSpecName "kube-api-access-d2twc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.415836 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e0e826e9-2dbc-4d49-96c2-e21265a9907a-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.415868 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2twc\" (UniqueName: \"kubernetes.io/projected/e0e826e9-2dbc-4d49-96c2-e21265a9907a-kube-api-access-d2twc\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.833436 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"47cf27a1d579d06ca4f6c0124e1a7218eaa708d4b9f10e03cf5124c1b88d16b7"} Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.833963 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerStarted","Data":"4ba7f085163f761a926fc23b3df8baf41bed014e786fa75f064425ec412d6aac"} Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.839186 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-vwmz9" event={"ID":"81b4221b-0db4-4391-a152-951dbe6700cb","Type":"ContainerStarted","Data":"9ec8b34270c8cb61ca7735cf44513c82b7b7b7a7970712874d5239863d652840"} Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.841041 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8tvjg-config-vzv5b" event={"ID":"e0e826e9-2dbc-4d49-96c2-e21265a9907a","Type":"ContainerDied","Data":"e4b858c5896578bc4a3fe57ed36bd37217838ecc43af9d64e1524307d366b7b0"} Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.841076 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e4b858c5896578bc4a3fe57ed36bd37217838ecc43af9d64e1524307d366b7b0" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.841113 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8tvjg-config-vzv5b" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.877380 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=28.924909133 podStartE2EDuration="34.877347059s" podCreationTimestamp="2026-01-06 08:33:27 +0000 UTC" firstStartedPulling="2026-01-06 08:33:45.593901784 +0000 UTC m=+1127.640074621" lastFinishedPulling="2026-01-06 08:33:51.54633971 +0000 UTC m=+1133.592512547" observedRunningTime="2026-01-06 08:34:01.86805317 +0000 UTC m=+1143.914226047" watchObservedRunningTime="2026-01-06 08:34:01.877347059 +0000 UTC m=+1143.923519916" Jan 06 08:34:01 crc kubenswrapper[4784]: I0106 08:34:01.906555 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-vwmz9" podStartSLOduration=1.940120103 podStartE2EDuration="15.906515969s" podCreationTimestamp="2026-01-06 08:33:46 +0000 UTC" firstStartedPulling="2026-01-06 08:33:47.266811373 +0000 UTC m=+1129.312984210" lastFinishedPulling="2026-01-06 08:34:01.233207239 +0000 UTC m=+1143.279380076" observedRunningTime="2026-01-06 08:34:01.900340676 +0000 UTC m=+1143.946513533" watchObservedRunningTime="2026-01-06 08:34:01.906515969 +0000 UTC m=+1143.952688806" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.162311 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75bdffd66f-w6jbq"] Jan 06 08:34:02 crc kubenswrapper[4784]: E0106 08:34:02.164426 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0e826e9-2dbc-4d49-96c2-e21265a9907a" containerName="ovn-config" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.164567 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0e826e9-2dbc-4d49-96c2-e21265a9907a" containerName="ovn-config" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.164927 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0e826e9-2dbc-4d49-96c2-e21265a9907a" containerName="ovn-config" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.166223 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.168505 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.182887 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75bdffd66f-w6jbq"] Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.276383 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-8tvjg-config-vzv5b"] Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.285347 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-8tvjg-config-vzv5b"] Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.334204 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-config\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.334275 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9sc6b\" (UniqueName: \"kubernetes.io/projected/8a90527d-4de4-42bf-9661-f4fe0bd16579-kube-api-access-9sc6b\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.334435 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-dns-svc\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.334528 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-dns-swift-storage-0\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.335402 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-ovsdbserver-nb\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.335479 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-ovsdbserver-sb\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.344730 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0e826e9-2dbc-4d49-96c2-e21265a9907a" path="/var/lib/kubelet/pods/e0e826e9-2dbc-4d49-96c2-e21265a9907a/volumes" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.437685 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-ovsdbserver-nb\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.438220 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-ovsdbserver-sb\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.438309 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-config\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.438343 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9sc6b\" (UniqueName: \"kubernetes.io/projected/8a90527d-4de4-42bf-9661-f4fe0bd16579-kube-api-access-9sc6b\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.438383 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-dns-svc\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.438411 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-dns-swift-storage-0\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.438967 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-ovsdbserver-nb\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.439142 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-ovsdbserver-sb\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.439792 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-config\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.440129 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-dns-svc\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.440568 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-dns-swift-storage-0\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.462460 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9sc6b\" (UniqueName: \"kubernetes.io/projected/8a90527d-4de4-42bf-9661-f4fe0bd16579-kube-api-access-9sc6b\") pod \"dnsmasq-dns-75bdffd66f-w6jbq\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.483664 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.645887 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.979396 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75bdffd66f-w6jbq"] Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.992428 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-8ff6v"] Jan 06 08:34:02 crc kubenswrapper[4784]: I0106 08:34:02.993625 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8ff6v" Jan 06 08:34:02 crc kubenswrapper[4784]: W0106 08:34:02.994000 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a90527d_4de4_42bf_9661_f4fe0bd16579.slice/crio-9371bc3a7dd6e25a85e28117bf50e8dc8c8580b5c85b127d3a055ae8f411de38 WatchSource:0}: Error finding container 9371bc3a7dd6e25a85e28117bf50e8dc8c8580b5c85b127d3a055ae8f411de38: Status 404 returned error can't find the container with id 9371bc3a7dd6e25a85e28117bf50e8dc8c8580b5c85b127d3a055ae8f411de38 Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.003687 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.025314 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-8ff6v"] Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.161283 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45abd702-0d1f-4e81-b043-9cbd8ed1591b-operator-scripts\") pod \"cinder-db-create-8ff6v\" (UID: \"45abd702-0d1f-4e81-b043-9cbd8ed1591b\") " pod="openstack/cinder-db-create-8ff6v" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.161699 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxzxn\" (UniqueName: \"kubernetes.io/projected/45abd702-0d1f-4e81-b043-9cbd8ed1591b-kube-api-access-fxzxn\") pod \"cinder-db-create-8ff6v\" (UID: \"45abd702-0d1f-4e81-b043-9cbd8ed1591b\") " pod="openstack/cinder-db-create-8ff6v" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.167155 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-7tst7"] Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.168575 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7tst7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.182896 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-7tst7"] Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.248042 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-d356-account-create-update-wh749"] Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.249407 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d356-account-create-update-wh749" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.259106 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-d356-account-create-update-wh749"] Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.264050 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee0707c4-0382-44eb-b6e9-0a2c076862ae-operator-scripts\") pod \"barbican-d356-account-create-update-wh749\" (UID: \"ee0707c4-0382-44eb-b6e9-0a2c076862ae\") " pod="openstack/barbican-d356-account-create-update-wh749" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.264161 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45abd702-0d1f-4e81-b043-9cbd8ed1591b-operator-scripts\") pod \"cinder-db-create-8ff6v\" (UID: \"45abd702-0d1f-4e81-b043-9cbd8ed1591b\") " pod="openstack/cinder-db-create-8ff6v" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.264196 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kg4t6\" (UniqueName: \"kubernetes.io/projected/f091be74-b0f9-4291-9202-20e877c55b30-kube-api-access-kg4t6\") pod \"barbican-db-create-7tst7\" (UID: \"f091be74-b0f9-4291-9202-20e877c55b30\") " pod="openstack/barbican-db-create-7tst7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.264222 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxzxn\" (UniqueName: \"kubernetes.io/projected/45abd702-0d1f-4e81-b043-9cbd8ed1591b-kube-api-access-fxzxn\") pod \"cinder-db-create-8ff6v\" (UID: \"45abd702-0d1f-4e81-b043-9cbd8ed1591b\") " pod="openstack/cinder-db-create-8ff6v" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.264243 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f091be74-b0f9-4291-9202-20e877c55b30-operator-scripts\") pod \"barbican-db-create-7tst7\" (UID: \"f091be74-b0f9-4291-9202-20e877c55b30\") " pod="openstack/barbican-db-create-7tst7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.264270 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58xmg\" (UniqueName: \"kubernetes.io/projected/ee0707c4-0382-44eb-b6e9-0a2c076862ae-kube-api-access-58xmg\") pod \"barbican-d356-account-create-update-wh749\" (UID: \"ee0707c4-0382-44eb-b6e9-0a2c076862ae\") " pod="openstack/barbican-d356-account-create-update-wh749" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.265221 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45abd702-0d1f-4e81-b043-9cbd8ed1591b-operator-scripts\") pod \"cinder-db-create-8ff6v\" (UID: \"45abd702-0d1f-4e81-b043-9cbd8ed1591b\") " pod="openstack/cinder-db-create-8ff6v" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.270827 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.295215 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxzxn\" (UniqueName: \"kubernetes.io/projected/45abd702-0d1f-4e81-b043-9cbd8ed1591b-kube-api-access-fxzxn\") pod \"cinder-db-create-8ff6v\" (UID: \"45abd702-0d1f-4e81-b043-9cbd8ed1591b\") " pod="openstack/cinder-db-create-8ff6v" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.359944 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8ff6v" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.365895 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee0707c4-0382-44eb-b6e9-0a2c076862ae-operator-scripts\") pod \"barbican-d356-account-create-update-wh749\" (UID: \"ee0707c4-0382-44eb-b6e9-0a2c076862ae\") " pod="openstack/barbican-d356-account-create-update-wh749" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.365991 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kg4t6\" (UniqueName: \"kubernetes.io/projected/f091be74-b0f9-4291-9202-20e877c55b30-kube-api-access-kg4t6\") pod \"barbican-db-create-7tst7\" (UID: \"f091be74-b0f9-4291-9202-20e877c55b30\") " pod="openstack/barbican-db-create-7tst7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.366026 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f091be74-b0f9-4291-9202-20e877c55b30-operator-scripts\") pod \"barbican-db-create-7tst7\" (UID: \"f091be74-b0f9-4291-9202-20e877c55b30\") " pod="openstack/barbican-db-create-7tst7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.366044 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58xmg\" (UniqueName: \"kubernetes.io/projected/ee0707c4-0382-44eb-b6e9-0a2c076862ae-kube-api-access-58xmg\") pod \"barbican-d356-account-create-update-wh749\" (UID: \"ee0707c4-0382-44eb-b6e9-0a2c076862ae\") " pod="openstack/barbican-d356-account-create-update-wh749" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.367143 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee0707c4-0382-44eb-b6e9-0a2c076862ae-operator-scripts\") pod \"barbican-d356-account-create-update-wh749\" (UID: \"ee0707c4-0382-44eb-b6e9-0a2c076862ae\") " pod="openstack/barbican-d356-account-create-update-wh749" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.368115 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f091be74-b0f9-4291-9202-20e877c55b30-operator-scripts\") pod \"barbican-db-create-7tst7\" (UID: \"f091be74-b0f9-4291-9202-20e877c55b30\") " pod="openstack/barbican-db-create-7tst7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.397558 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-c78a-account-create-update-97xxc"] Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.398730 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58xmg\" (UniqueName: \"kubernetes.io/projected/ee0707c4-0382-44eb-b6e9-0a2c076862ae-kube-api-access-58xmg\") pod \"barbican-d356-account-create-update-wh749\" (UID: \"ee0707c4-0382-44eb-b6e9-0a2c076862ae\") " pod="openstack/barbican-d356-account-create-update-wh749" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.402248 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kg4t6\" (UniqueName: \"kubernetes.io/projected/f091be74-b0f9-4291-9202-20e877c55b30-kube-api-access-kg4t6\") pod \"barbican-db-create-7tst7\" (UID: \"f091be74-b0f9-4291-9202-20e877c55b30\") " pod="openstack/barbican-db-create-7tst7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.426181 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-c78a-account-create-update-97xxc"] Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.426360 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c78a-account-create-update-97xxc" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.430956 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.467288 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t72r5\" (UniqueName: \"kubernetes.io/projected/441e6fe5-5597-44c2-883e-7279b94a858c-kube-api-access-t72r5\") pod \"cinder-c78a-account-create-update-97xxc\" (UID: \"441e6fe5-5597-44c2-883e-7279b94a858c\") " pod="openstack/cinder-c78a-account-create-update-97xxc" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.467735 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/441e6fe5-5597-44c2-883e-7279b94a858c-operator-scripts\") pod \"cinder-c78a-account-create-update-97xxc\" (UID: \"441e6fe5-5597-44c2-883e-7279b94a858c\") " pod="openstack/cinder-c78a-account-create-update-97xxc" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.491560 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-gp9r7"] Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.492994 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gp9r7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.496999 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.497086 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.497130 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.497964 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-lgkm4" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.501820 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-4b287"] Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.503265 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-4b287" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.518180 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-gp9r7"] Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.529722 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-4b287"] Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.558530 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7tst7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.570083 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/441e6fe5-5597-44c2-883e-7279b94a858c-operator-scripts\") pod \"cinder-c78a-account-create-update-97xxc\" (UID: \"441e6fe5-5597-44c2-883e-7279b94a858c\") " pod="openstack/cinder-c78a-account-create-update-97xxc" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.570152 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-config-data\") pod \"keystone-db-sync-gp9r7\" (UID: \"83a1fa13-fedb-4baf-bd94-7ce99bbeefab\") " pod="openstack/keystone-db-sync-gp9r7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.570175 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fx997\" (UniqueName: \"kubernetes.io/projected/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-kube-api-access-fx997\") pod \"keystone-db-sync-gp9r7\" (UID: \"83a1fa13-fedb-4baf-bd94-7ce99bbeefab\") " pod="openstack/keystone-db-sync-gp9r7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.570199 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9ede4c2-499b-4c2c-8aa5-11344298ebae-operator-scripts\") pod \"neutron-db-create-4b287\" (UID: \"e9ede4c2-499b-4c2c-8aa5-11344298ebae\") " pod="openstack/neutron-db-create-4b287" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.570263 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-combined-ca-bundle\") pod \"keystone-db-sync-gp9r7\" (UID: \"83a1fa13-fedb-4baf-bd94-7ce99bbeefab\") " pod="openstack/keystone-db-sync-gp9r7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.570336 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj4wv\" (UniqueName: \"kubernetes.io/projected/e9ede4c2-499b-4c2c-8aa5-11344298ebae-kube-api-access-tj4wv\") pod \"neutron-db-create-4b287\" (UID: \"e9ede4c2-499b-4c2c-8aa5-11344298ebae\") " pod="openstack/neutron-db-create-4b287" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.570357 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t72r5\" (UniqueName: \"kubernetes.io/projected/441e6fe5-5597-44c2-883e-7279b94a858c-kube-api-access-t72r5\") pod \"cinder-c78a-account-create-update-97xxc\" (UID: \"441e6fe5-5597-44c2-883e-7279b94a858c\") " pod="openstack/cinder-c78a-account-create-update-97xxc" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.571646 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/441e6fe5-5597-44c2-883e-7279b94a858c-operator-scripts\") pod \"cinder-c78a-account-create-update-97xxc\" (UID: \"441e6fe5-5597-44c2-883e-7279b94a858c\") " pod="openstack/cinder-c78a-account-create-update-97xxc" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.596135 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t72r5\" (UniqueName: \"kubernetes.io/projected/441e6fe5-5597-44c2-883e-7279b94a858c-kube-api-access-t72r5\") pod \"cinder-c78a-account-create-update-97xxc\" (UID: \"441e6fe5-5597-44c2-883e-7279b94a858c\") " pod="openstack/cinder-c78a-account-create-update-97xxc" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.619506 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-c30b-account-create-update-lssn9"] Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.621106 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c30b-account-create-update-lssn9" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.624990 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.634562 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-c30b-account-create-update-lssn9"] Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.654187 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d356-account-create-update-wh749" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.672228 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj4wv\" (UniqueName: \"kubernetes.io/projected/e9ede4c2-499b-4c2c-8aa5-11344298ebae-kube-api-access-tj4wv\") pod \"neutron-db-create-4b287\" (UID: \"e9ede4c2-499b-4c2c-8aa5-11344298ebae\") " pod="openstack/neutron-db-create-4b287" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.675069 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-config-data\") pod \"keystone-db-sync-gp9r7\" (UID: \"83a1fa13-fedb-4baf-bd94-7ce99bbeefab\") " pod="openstack/keystone-db-sync-gp9r7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.675125 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fx997\" (UniqueName: \"kubernetes.io/projected/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-kube-api-access-fx997\") pod \"keystone-db-sync-gp9r7\" (UID: \"83a1fa13-fedb-4baf-bd94-7ce99bbeefab\") " pod="openstack/keystone-db-sync-gp9r7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.675158 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9ede4c2-499b-4c2c-8aa5-11344298ebae-operator-scripts\") pod \"neutron-db-create-4b287\" (UID: \"e9ede4c2-499b-4c2c-8aa5-11344298ebae\") " pod="openstack/neutron-db-create-4b287" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.675200 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qflmr\" (UniqueName: \"kubernetes.io/projected/93749da5-57fd-4761-aa84-95c9cec12e52-kube-api-access-qflmr\") pod \"neutron-c30b-account-create-update-lssn9\" (UID: \"93749da5-57fd-4761-aa84-95c9cec12e52\") " pod="openstack/neutron-c30b-account-create-update-lssn9" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.675343 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/93749da5-57fd-4761-aa84-95c9cec12e52-operator-scripts\") pod \"neutron-c30b-account-create-update-lssn9\" (UID: \"93749da5-57fd-4761-aa84-95c9cec12e52\") " pod="openstack/neutron-c30b-account-create-update-lssn9" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.675382 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-combined-ca-bundle\") pod \"keystone-db-sync-gp9r7\" (UID: \"83a1fa13-fedb-4baf-bd94-7ce99bbeefab\") " pod="openstack/keystone-db-sync-gp9r7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.678847 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9ede4c2-499b-4c2c-8aa5-11344298ebae-operator-scripts\") pod \"neutron-db-create-4b287\" (UID: \"e9ede4c2-499b-4c2c-8aa5-11344298ebae\") " pod="openstack/neutron-db-create-4b287" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.681621 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-config-data\") pod \"keystone-db-sync-gp9r7\" (UID: \"83a1fa13-fedb-4baf-bd94-7ce99bbeefab\") " pod="openstack/keystone-db-sync-gp9r7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.685388 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-combined-ca-bundle\") pod \"keystone-db-sync-gp9r7\" (UID: \"83a1fa13-fedb-4baf-bd94-7ce99bbeefab\") " pod="openstack/keystone-db-sync-gp9r7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.701057 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj4wv\" (UniqueName: \"kubernetes.io/projected/e9ede4c2-499b-4c2c-8aa5-11344298ebae-kube-api-access-tj4wv\") pod \"neutron-db-create-4b287\" (UID: \"e9ede4c2-499b-4c2c-8aa5-11344298ebae\") " pod="openstack/neutron-db-create-4b287" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.715352 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fx997\" (UniqueName: \"kubernetes.io/projected/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-kube-api-access-fx997\") pod \"keystone-db-sync-gp9r7\" (UID: \"83a1fa13-fedb-4baf-bd94-7ce99bbeefab\") " pod="openstack/keystone-db-sync-gp9r7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.777138 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qflmr\" (UniqueName: \"kubernetes.io/projected/93749da5-57fd-4761-aa84-95c9cec12e52-kube-api-access-qflmr\") pod \"neutron-c30b-account-create-update-lssn9\" (UID: \"93749da5-57fd-4761-aa84-95c9cec12e52\") " pod="openstack/neutron-c30b-account-create-update-lssn9" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.777629 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/93749da5-57fd-4761-aa84-95c9cec12e52-operator-scripts\") pod \"neutron-c30b-account-create-update-lssn9\" (UID: \"93749da5-57fd-4761-aa84-95c9cec12e52\") " pod="openstack/neutron-c30b-account-create-update-lssn9" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.778448 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/93749da5-57fd-4761-aa84-95c9cec12e52-operator-scripts\") pod \"neutron-c30b-account-create-update-lssn9\" (UID: \"93749da5-57fd-4761-aa84-95c9cec12e52\") " pod="openstack/neutron-c30b-account-create-update-lssn9" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.811591 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qflmr\" (UniqueName: \"kubernetes.io/projected/93749da5-57fd-4761-aa84-95c9cec12e52-kube-api-access-qflmr\") pod \"neutron-c30b-account-create-update-lssn9\" (UID: \"93749da5-57fd-4761-aa84-95c9cec12e52\") " pod="openstack/neutron-c30b-account-create-update-lssn9" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.819837 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c78a-account-create-update-97xxc" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.836399 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gp9r7" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.863566 4784 generic.go:334] "Generic (PLEG): container finished" podID="8a90527d-4de4-42bf-9661-f4fe0bd16579" containerID="e8d0d25462ae532a0960554c07ee1eaae3e113a0e4ad88137d918acd3b67d4fa" exitCode=0 Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.863623 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" event={"ID":"8a90527d-4de4-42bf-9661-f4fe0bd16579","Type":"ContainerDied","Data":"e8d0d25462ae532a0960554c07ee1eaae3e113a0e4ad88137d918acd3b67d4fa"} Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.863662 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" event={"ID":"8a90527d-4de4-42bf-9661-f4fe0bd16579","Type":"ContainerStarted","Data":"9371bc3a7dd6e25a85e28117bf50e8dc8c8580b5c85b127d3a055ae8f411de38"} Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.917947 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-4b287" Jan 06 08:34:03 crc kubenswrapper[4784]: I0106 08:34:03.966661 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c30b-account-create-update-lssn9" Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.035664 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-8ff6v"] Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.119638 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-d356-account-create-update-wh749"] Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.163628 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-7tst7"] Jan 06 08:34:04 crc kubenswrapper[4784]: W0106 08:34:04.171973 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee0707c4_0382_44eb_b6e9_0a2c076862ae.slice/crio-f9cfe2af78ffbaca729cf82272c50a9b5e5750c878e557832e818710a5f43619 WatchSource:0}: Error finding container f9cfe2af78ffbaca729cf82272c50a9b5e5750c878e557832e818710a5f43619: Status 404 returned error can't find the container with id f9cfe2af78ffbaca729cf82272c50a9b5e5750c878e557832e818710a5f43619 Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.417806 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-c78a-account-create-update-97xxc"] Jan 06 08:34:04 crc kubenswrapper[4784]: W0106 08:34:04.438020 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod441e6fe5_5597_44c2_883e_7279b94a858c.slice/crio-b14ae9919b81d06fe34f9eadf35c72d58ed64f09293fb59f83dc4ea315bd601b WatchSource:0}: Error finding container b14ae9919b81d06fe34f9eadf35c72d58ed64f09293fb59f83dc4ea315bd601b: Status 404 returned error can't find the container with id b14ae9919b81d06fe34f9eadf35c72d58ed64f09293fb59f83dc4ea315bd601b Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.528099 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-c30b-account-create-update-lssn9"] Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.535075 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-gp9r7"] Jan 06 08:34:04 crc kubenswrapper[4784]: W0106 08:34:04.551076 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod93749da5_57fd_4761_aa84_95c9cec12e52.slice/crio-689935dc4cf82e155538d23988c02c35cab0d15fc1279d93b2ea968ec6098004 WatchSource:0}: Error finding container 689935dc4cf82e155538d23988c02c35cab0d15fc1279d93b2ea968ec6098004: Status 404 returned error can't find the container with id 689935dc4cf82e155538d23988c02c35cab0d15fc1279d93b2ea968ec6098004 Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.622540 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-4b287"] Jan 06 08:34:04 crc kubenswrapper[4784]: W0106 08:34:04.633919 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode9ede4c2_499b_4c2c_8aa5_11344298ebae.slice/crio-f8359bf532f3781562dc6a86c01798a3c19ae826905b5d3ee87437a7afc51368 WatchSource:0}: Error finding container f8359bf532f3781562dc6a86c01798a3c19ae826905b5d3ee87437a7afc51368: Status 404 returned error can't find the container with id f8359bf532f3781562dc6a86c01798a3c19ae826905b5d3ee87437a7afc51368 Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.873791 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c30b-account-create-update-lssn9" event={"ID":"93749da5-57fd-4761-aa84-95c9cec12e52","Type":"ContainerStarted","Data":"03dc15805f46528c7cb3d4d2d54a65a050d2a7e05316bc87de7d034099857e2d"} Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.873850 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c30b-account-create-update-lssn9" event={"ID":"93749da5-57fd-4761-aa84-95c9cec12e52","Type":"ContainerStarted","Data":"689935dc4cf82e155538d23988c02c35cab0d15fc1279d93b2ea968ec6098004"} Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.877820 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" event={"ID":"8a90527d-4de4-42bf-9661-f4fe0bd16579","Type":"ContainerStarted","Data":"fa0c569e23b0406cfe8984fe9a3315da9da5b3978a4f85113647cad15c7813b3"} Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.878007 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.879523 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gp9r7" event={"ID":"83a1fa13-fedb-4baf-bd94-7ce99bbeefab","Type":"ContainerStarted","Data":"7302722c598f6203ceee8b862689e97ec23a1b0536c03ad2faaf189179aa89ae"} Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.880956 4784 generic.go:334] "Generic (PLEG): container finished" podID="f091be74-b0f9-4291-9202-20e877c55b30" containerID="f266446de084888b268e8cd715a99fdc4b5092a328d89ca889e7c734c4e993cf" exitCode=0 Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.881019 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-7tst7" event={"ID":"f091be74-b0f9-4291-9202-20e877c55b30","Type":"ContainerDied","Data":"f266446de084888b268e8cd715a99fdc4b5092a328d89ca889e7c734c4e993cf"} Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.881047 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-7tst7" event={"ID":"f091be74-b0f9-4291-9202-20e877c55b30","Type":"ContainerStarted","Data":"05e51e4ee5f625f916a1db7aab4ac54c4e5ad5cf35b5e932da241365b4b22bb0"} Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.882472 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c78a-account-create-update-97xxc" event={"ID":"441e6fe5-5597-44c2-883e-7279b94a858c","Type":"ContainerStarted","Data":"a57052782aaf0ace5ddc1d07ceff1112413c84255f703c8070bf5c16fd07cf08"} Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.882554 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c78a-account-create-update-97xxc" event={"ID":"441e6fe5-5597-44c2-883e-7279b94a858c","Type":"ContainerStarted","Data":"b14ae9919b81d06fe34f9eadf35c72d58ed64f09293fb59f83dc4ea315bd601b"} Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.885680 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d356-account-create-update-wh749" event={"ID":"ee0707c4-0382-44eb-b6e9-0a2c076862ae","Type":"ContainerStarted","Data":"8a4a02d92a8a711ca0ebd750090283fa911d0db029a5a0b4b70362192329d69d"} Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.885735 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d356-account-create-update-wh749" event={"ID":"ee0707c4-0382-44eb-b6e9-0a2c076862ae","Type":"ContainerStarted","Data":"f9cfe2af78ffbaca729cf82272c50a9b5e5750c878e557832e818710a5f43619"} Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.888146 4784 generic.go:334] "Generic (PLEG): container finished" podID="45abd702-0d1f-4e81-b043-9cbd8ed1591b" containerID="b9c5949b3a85da1a9f1fb60412152f538ba8d8fc52bb6212a53de6e651a54857" exitCode=0 Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.888199 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-8ff6v" event={"ID":"45abd702-0d1f-4e81-b043-9cbd8ed1591b","Type":"ContainerDied","Data":"b9c5949b3a85da1a9f1fb60412152f538ba8d8fc52bb6212a53de6e651a54857"} Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.888221 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-8ff6v" event={"ID":"45abd702-0d1f-4e81-b043-9cbd8ed1591b","Type":"ContainerStarted","Data":"ca9eb05686aabf227eb74e246e4274e8a5e72d0f67fbccde57589600177e352b"} Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.891870 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-4b287" event={"ID":"e9ede4c2-499b-4c2c-8aa5-11344298ebae","Type":"ContainerStarted","Data":"90f90ea921b8da7940600a58d3af16d3747f97f35a9b4046001ef979cb52ee03"} Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.891945 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-4b287" event={"ID":"e9ede4c2-499b-4c2c-8aa5-11344298ebae","Type":"ContainerStarted","Data":"f8359bf532f3781562dc6a86c01798a3c19ae826905b5d3ee87437a7afc51368"} Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.905400 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-c30b-account-create-update-lssn9" podStartSLOduration=1.905371862 podStartE2EDuration="1.905371862s" podCreationTimestamp="2026-01-06 08:34:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:04.895313998 +0000 UTC m=+1146.941486835" watchObservedRunningTime="2026-01-06 08:34:04.905371862 +0000 UTC m=+1146.951544709" Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.977805 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" podStartSLOduration=2.97778126 podStartE2EDuration="2.97778126s" podCreationTimestamp="2026-01-06 08:34:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:04.951346025 +0000 UTC m=+1146.997518852" watchObservedRunningTime="2026-01-06 08:34:04.97778126 +0000 UTC m=+1147.023954097" Jan 06 08:34:04 crc kubenswrapper[4784]: I0106 08:34:04.979001 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-4b287" podStartSLOduration=1.9789941070000001 podStartE2EDuration="1.978994107s" podCreationTimestamp="2026-01-06 08:34:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:04.970568084 +0000 UTC m=+1147.016740921" watchObservedRunningTime="2026-01-06 08:34:04.978994107 +0000 UTC m=+1147.025166944" Jan 06 08:34:05 crc kubenswrapper[4784]: I0106 08:34:05.044636 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-c78a-account-create-update-97xxc" podStartSLOduration=2.044610613 podStartE2EDuration="2.044610613s" podCreationTimestamp="2026-01-06 08:34:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:05.039971639 +0000 UTC m=+1147.086144476" watchObservedRunningTime="2026-01-06 08:34:05.044610613 +0000 UTC m=+1147.090783450" Jan 06 08:34:05 crc kubenswrapper[4784]: I0106 08:34:05.058194 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-d356-account-create-update-wh749" podStartSLOduration=2.058171396 podStartE2EDuration="2.058171396s" podCreationTimestamp="2026-01-06 08:34:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:05.056812234 +0000 UTC m=+1147.102985071" watchObservedRunningTime="2026-01-06 08:34:05.058171396 +0000 UTC m=+1147.104344233" Jan 06 08:34:05 crc kubenswrapper[4784]: I0106 08:34:05.906640 4784 generic.go:334] "Generic (PLEG): container finished" podID="93749da5-57fd-4761-aa84-95c9cec12e52" containerID="03dc15805f46528c7cb3d4d2d54a65a050d2a7e05316bc87de7d034099857e2d" exitCode=0 Jan 06 08:34:05 crc kubenswrapper[4784]: I0106 08:34:05.906982 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c30b-account-create-update-lssn9" event={"ID":"93749da5-57fd-4761-aa84-95c9cec12e52","Type":"ContainerDied","Data":"03dc15805f46528c7cb3d4d2d54a65a050d2a7e05316bc87de7d034099857e2d"} Jan 06 08:34:05 crc kubenswrapper[4784]: I0106 08:34:05.909360 4784 generic.go:334] "Generic (PLEG): container finished" podID="441e6fe5-5597-44c2-883e-7279b94a858c" containerID="a57052782aaf0ace5ddc1d07ceff1112413c84255f703c8070bf5c16fd07cf08" exitCode=0 Jan 06 08:34:05 crc kubenswrapper[4784]: I0106 08:34:05.909412 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c78a-account-create-update-97xxc" event={"ID":"441e6fe5-5597-44c2-883e-7279b94a858c","Type":"ContainerDied","Data":"a57052782aaf0ace5ddc1d07ceff1112413c84255f703c8070bf5c16fd07cf08"} Jan 06 08:34:05 crc kubenswrapper[4784]: I0106 08:34:05.915243 4784 generic.go:334] "Generic (PLEG): container finished" podID="ee0707c4-0382-44eb-b6e9-0a2c076862ae" containerID="8a4a02d92a8a711ca0ebd750090283fa911d0db029a5a0b4b70362192329d69d" exitCode=0 Jan 06 08:34:05 crc kubenswrapper[4784]: I0106 08:34:05.915389 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d356-account-create-update-wh749" event={"ID":"ee0707c4-0382-44eb-b6e9-0a2c076862ae","Type":"ContainerDied","Data":"8a4a02d92a8a711ca0ebd750090283fa911d0db029a5a0b4b70362192329d69d"} Jan 06 08:34:05 crc kubenswrapper[4784]: I0106 08:34:05.921057 4784 generic.go:334] "Generic (PLEG): container finished" podID="e9ede4c2-499b-4c2c-8aa5-11344298ebae" containerID="90f90ea921b8da7940600a58d3af16d3747f97f35a9b4046001ef979cb52ee03" exitCode=0 Jan 06 08:34:05 crc kubenswrapper[4784]: I0106 08:34:05.921141 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-4b287" event={"ID":"e9ede4c2-499b-4c2c-8aa5-11344298ebae","Type":"ContainerDied","Data":"90f90ea921b8da7940600a58d3af16d3747f97f35a9b4046001ef979cb52ee03"} Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.432093 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7tst7" Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.447389 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8ff6v" Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.474352 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45abd702-0d1f-4e81-b043-9cbd8ed1591b-operator-scripts\") pod \"45abd702-0d1f-4e81-b043-9cbd8ed1591b\" (UID: \"45abd702-0d1f-4e81-b043-9cbd8ed1591b\") " Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.474748 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxzxn\" (UniqueName: \"kubernetes.io/projected/45abd702-0d1f-4e81-b043-9cbd8ed1591b-kube-api-access-fxzxn\") pod \"45abd702-0d1f-4e81-b043-9cbd8ed1591b\" (UID: \"45abd702-0d1f-4e81-b043-9cbd8ed1591b\") " Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.474928 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kg4t6\" (UniqueName: \"kubernetes.io/projected/f091be74-b0f9-4291-9202-20e877c55b30-kube-api-access-kg4t6\") pod \"f091be74-b0f9-4291-9202-20e877c55b30\" (UID: \"f091be74-b0f9-4291-9202-20e877c55b30\") " Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.475063 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f091be74-b0f9-4291-9202-20e877c55b30-operator-scripts\") pod \"f091be74-b0f9-4291-9202-20e877c55b30\" (UID: \"f091be74-b0f9-4291-9202-20e877c55b30\") " Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.476351 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f091be74-b0f9-4291-9202-20e877c55b30-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f091be74-b0f9-4291-9202-20e877c55b30" (UID: "f091be74-b0f9-4291-9202-20e877c55b30"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.476878 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45abd702-0d1f-4e81-b043-9cbd8ed1591b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "45abd702-0d1f-4e81-b043-9cbd8ed1591b" (UID: "45abd702-0d1f-4e81-b043-9cbd8ed1591b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.513793 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f091be74-b0f9-4291-9202-20e877c55b30-kube-api-access-kg4t6" (OuterVolumeSpecName: "kube-api-access-kg4t6") pod "f091be74-b0f9-4291-9202-20e877c55b30" (UID: "f091be74-b0f9-4291-9202-20e877c55b30"). InnerVolumeSpecName "kube-api-access-kg4t6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.516282 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45abd702-0d1f-4e81-b043-9cbd8ed1591b-kube-api-access-fxzxn" (OuterVolumeSpecName: "kube-api-access-fxzxn") pod "45abd702-0d1f-4e81-b043-9cbd8ed1591b" (UID: "45abd702-0d1f-4e81-b043-9cbd8ed1591b"). InnerVolumeSpecName "kube-api-access-fxzxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.582349 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45abd702-0d1f-4e81-b043-9cbd8ed1591b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.582398 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxzxn\" (UniqueName: \"kubernetes.io/projected/45abd702-0d1f-4e81-b043-9cbd8ed1591b-kube-api-access-fxzxn\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.582420 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kg4t6\" (UniqueName: \"kubernetes.io/projected/f091be74-b0f9-4291-9202-20e877c55b30-kube-api-access-kg4t6\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.582433 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f091be74-b0f9-4291-9202-20e877c55b30-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.936324 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-8ff6v" event={"ID":"45abd702-0d1f-4e81-b043-9cbd8ed1591b","Type":"ContainerDied","Data":"ca9eb05686aabf227eb74e246e4274e8a5e72d0f67fbccde57589600177e352b"} Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.936365 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8ff6v" Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.936401 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca9eb05686aabf227eb74e246e4274e8a5e72d0f67fbccde57589600177e352b" Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.939146 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7tst7" Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.939779 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-7tst7" event={"ID":"f091be74-b0f9-4291-9202-20e877c55b30","Type":"ContainerDied","Data":"05e51e4ee5f625f916a1db7aab4ac54c4e5ad5cf35b5e932da241365b4b22bb0"} Jan 06 08:34:06 crc kubenswrapper[4784]: I0106 08:34:06.939846 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05e51e4ee5f625f916a1db7aab4ac54c4e5ad5cf35b5e932da241365b4b22bb0" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.317525 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-4b287" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.396247 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tj4wv\" (UniqueName: \"kubernetes.io/projected/e9ede4c2-499b-4c2c-8aa5-11344298ebae-kube-api-access-tj4wv\") pod \"e9ede4c2-499b-4c2c-8aa5-11344298ebae\" (UID: \"e9ede4c2-499b-4c2c-8aa5-11344298ebae\") " Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.396330 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9ede4c2-499b-4c2c-8aa5-11344298ebae-operator-scripts\") pod \"e9ede4c2-499b-4c2c-8aa5-11344298ebae\" (UID: \"e9ede4c2-499b-4c2c-8aa5-11344298ebae\") " Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.398473 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9ede4c2-499b-4c2c-8aa5-11344298ebae-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e9ede4c2-499b-4c2c-8aa5-11344298ebae" (UID: "e9ede4c2-499b-4c2c-8aa5-11344298ebae"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.447911 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9ede4c2-499b-4c2c-8aa5-11344298ebae-kube-api-access-tj4wv" (OuterVolumeSpecName: "kube-api-access-tj4wv") pod "e9ede4c2-499b-4c2c-8aa5-11344298ebae" (UID: "e9ede4c2-499b-4c2c-8aa5-11344298ebae"). InnerVolumeSpecName "kube-api-access-tj4wv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.498320 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tj4wv\" (UniqueName: \"kubernetes.io/projected/e9ede4c2-499b-4c2c-8aa5-11344298ebae-kube-api-access-tj4wv\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.498359 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9ede4c2-499b-4c2c-8aa5-11344298ebae-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.555782 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c78a-account-create-update-97xxc" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.557221 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d356-account-create-update-wh749" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.561743 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c30b-account-create-update-lssn9" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.599992 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/93749da5-57fd-4761-aa84-95c9cec12e52-operator-scripts\") pod \"93749da5-57fd-4761-aa84-95c9cec12e52\" (UID: \"93749da5-57fd-4761-aa84-95c9cec12e52\") " Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.600064 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qflmr\" (UniqueName: \"kubernetes.io/projected/93749da5-57fd-4761-aa84-95c9cec12e52-kube-api-access-qflmr\") pod \"93749da5-57fd-4761-aa84-95c9cec12e52\" (UID: \"93749da5-57fd-4761-aa84-95c9cec12e52\") " Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.600143 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/441e6fe5-5597-44c2-883e-7279b94a858c-operator-scripts\") pod \"441e6fe5-5597-44c2-883e-7279b94a858c\" (UID: \"441e6fe5-5597-44c2-883e-7279b94a858c\") " Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.600267 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58xmg\" (UniqueName: \"kubernetes.io/projected/ee0707c4-0382-44eb-b6e9-0a2c076862ae-kube-api-access-58xmg\") pod \"ee0707c4-0382-44eb-b6e9-0a2c076862ae\" (UID: \"ee0707c4-0382-44eb-b6e9-0a2c076862ae\") " Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.600818 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/441e6fe5-5597-44c2-883e-7279b94a858c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "441e6fe5-5597-44c2-883e-7279b94a858c" (UID: "441e6fe5-5597-44c2-883e-7279b94a858c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.600831 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/93749da5-57fd-4761-aa84-95c9cec12e52-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "93749da5-57fd-4761-aa84-95c9cec12e52" (UID: "93749da5-57fd-4761-aa84-95c9cec12e52"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.600419 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee0707c4-0382-44eb-b6e9-0a2c076862ae-operator-scripts\") pod \"ee0707c4-0382-44eb-b6e9-0a2c076862ae\" (UID: \"ee0707c4-0382-44eb-b6e9-0a2c076862ae\") " Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.600957 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t72r5\" (UniqueName: \"kubernetes.io/projected/441e6fe5-5597-44c2-883e-7279b94a858c-kube-api-access-t72r5\") pod \"441e6fe5-5597-44c2-883e-7279b94a858c\" (UID: \"441e6fe5-5597-44c2-883e-7279b94a858c\") " Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.601441 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee0707c4-0382-44eb-b6e9-0a2c076862ae-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ee0707c4-0382-44eb-b6e9-0a2c076862ae" (UID: "ee0707c4-0382-44eb-b6e9-0a2c076862ae"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.601933 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/441e6fe5-5597-44c2-883e-7279b94a858c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.601950 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee0707c4-0382-44eb-b6e9-0a2c076862ae-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.601961 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/93749da5-57fd-4761-aa84-95c9cec12e52-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.604824 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93749da5-57fd-4761-aa84-95c9cec12e52-kube-api-access-qflmr" (OuterVolumeSpecName: "kube-api-access-qflmr") pod "93749da5-57fd-4761-aa84-95c9cec12e52" (UID: "93749da5-57fd-4761-aa84-95c9cec12e52"). InnerVolumeSpecName "kube-api-access-qflmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.605444 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/441e6fe5-5597-44c2-883e-7279b94a858c-kube-api-access-t72r5" (OuterVolumeSpecName: "kube-api-access-t72r5") pod "441e6fe5-5597-44c2-883e-7279b94a858c" (UID: "441e6fe5-5597-44c2-883e-7279b94a858c"). InnerVolumeSpecName "kube-api-access-t72r5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.627332 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee0707c4-0382-44eb-b6e9-0a2c076862ae-kube-api-access-58xmg" (OuterVolumeSpecName: "kube-api-access-58xmg") pod "ee0707c4-0382-44eb-b6e9-0a2c076862ae" (UID: "ee0707c4-0382-44eb-b6e9-0a2c076862ae"). InnerVolumeSpecName "kube-api-access-58xmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.703895 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58xmg\" (UniqueName: \"kubernetes.io/projected/ee0707c4-0382-44eb-b6e9-0a2c076862ae-kube-api-access-58xmg\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.703945 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t72r5\" (UniqueName: \"kubernetes.io/projected/441e6fe5-5597-44c2-883e-7279b94a858c-kube-api-access-t72r5\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.703960 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qflmr\" (UniqueName: \"kubernetes.io/projected/93749da5-57fd-4761-aa84-95c9cec12e52-kube-api-access-qflmr\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.960822 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-4b287" event={"ID":"e9ede4c2-499b-4c2c-8aa5-11344298ebae","Type":"ContainerDied","Data":"f8359bf532f3781562dc6a86c01798a3c19ae826905b5d3ee87437a7afc51368"} Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.960883 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8359bf532f3781562dc6a86c01798a3c19ae826905b5d3ee87437a7afc51368" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.960852 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-4b287" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.963465 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c30b-account-create-update-lssn9" event={"ID":"93749da5-57fd-4761-aa84-95c9cec12e52","Type":"ContainerDied","Data":"689935dc4cf82e155538d23988c02c35cab0d15fc1279d93b2ea968ec6098004"} Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.963512 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="689935dc4cf82e155538d23988c02c35cab0d15fc1279d93b2ea968ec6098004" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.963617 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c30b-account-create-update-lssn9" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.970020 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c78a-account-create-update-97xxc" event={"ID":"441e6fe5-5597-44c2-883e-7279b94a858c","Type":"ContainerDied","Data":"b14ae9919b81d06fe34f9eadf35c72d58ed64f09293fb59f83dc4ea315bd601b"} Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.970078 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b14ae9919b81d06fe34f9eadf35c72d58ed64f09293fb59f83dc4ea315bd601b" Jan 06 08:34:07 crc kubenswrapper[4784]: I0106 08:34:07.970078 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c78a-account-create-update-97xxc" Jan 06 08:34:08 crc kubenswrapper[4784]: I0106 08:34:07.973401 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d356-account-create-update-wh749" event={"ID":"ee0707c4-0382-44eb-b6e9-0a2c076862ae","Type":"ContainerDied","Data":"f9cfe2af78ffbaca729cf82272c50a9b5e5750c878e557832e818710a5f43619"} Jan 06 08:34:08 crc kubenswrapper[4784]: I0106 08:34:07.973434 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9cfe2af78ffbaca729cf82272c50a9b5e5750c878e557832e818710a5f43619" Jan 06 08:34:08 crc kubenswrapper[4784]: I0106 08:34:07.973506 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d356-account-create-update-wh749" Jan 06 08:34:12 crc kubenswrapper[4784]: I0106 08:34:12.485794 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:12 crc kubenswrapper[4784]: I0106 08:34:12.588819 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-5fw7r"] Jan 06 08:34:12 crc kubenswrapper[4784]: I0106 08:34:12.589326 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" podUID="90bf95cf-3c80-48fe-874d-5525c724a219" containerName="dnsmasq-dns" containerID="cri-o://7aa2022b04e1d7e859677179f803d6836738f66967d19b38d26815b72e459e21" gracePeriod=10 Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.030365 4784 generic.go:334] "Generic (PLEG): container finished" podID="90bf95cf-3c80-48fe-874d-5525c724a219" containerID="7aa2022b04e1d7e859677179f803d6836738f66967d19b38d26815b72e459e21" exitCode=0 Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.030440 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" event={"ID":"90bf95cf-3c80-48fe-874d-5525c724a219","Type":"ContainerDied","Data":"7aa2022b04e1d7e859677179f803d6836738f66967d19b38d26815b72e459e21"} Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.030471 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" event={"ID":"90bf95cf-3c80-48fe-874d-5525c724a219","Type":"ContainerDied","Data":"c52670684e06f7ba11b3b831b72ecc3bc11c1d431f1a842b162a17652ac98789"} Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.030496 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c52670684e06f7ba11b3b831b72ecc3bc11c1d431f1a842b162a17652ac98789" Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.031820 4784 generic.go:334] "Generic (PLEG): container finished" podID="81b4221b-0db4-4391-a152-951dbe6700cb" containerID="9ec8b34270c8cb61ca7735cf44513c82b7b7b7a7970712874d5239863d652840" exitCode=0 Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.031916 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-vwmz9" event={"ID":"81b4221b-0db4-4391-a152-951dbe6700cb","Type":"ContainerDied","Data":"9ec8b34270c8cb61ca7735cf44513c82b7b7b7a7970712874d5239863d652840"} Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.032954 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gp9r7" event={"ID":"83a1fa13-fedb-4baf-bd94-7ce99bbeefab","Type":"ContainerStarted","Data":"6aed5d5bdb3e840d82bc70bce9e03cc3f9557785201d5d0f6120d8c4bbbe42b2"} Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.048855 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.095943 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-gp9r7" podStartSLOduration=2.536684712 podStartE2EDuration="10.095923067s" podCreationTimestamp="2026-01-06 08:34:03 +0000 UTC" firstStartedPulling="2026-01-06 08:34:04.548565589 +0000 UTC m=+1146.594738426" lastFinishedPulling="2026-01-06 08:34:12.107803934 +0000 UTC m=+1154.153976781" observedRunningTime="2026-01-06 08:34:13.092397807 +0000 UTC m=+1155.138570644" watchObservedRunningTime="2026-01-06 08:34:13.095923067 +0000 UTC m=+1155.142095904" Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.246737 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-ovsdbserver-nb\") pod \"90bf95cf-3c80-48fe-874d-5525c724a219\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.246884 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-config\") pod \"90bf95cf-3c80-48fe-874d-5525c724a219\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.246949 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-csfw5\" (UniqueName: \"kubernetes.io/projected/90bf95cf-3c80-48fe-874d-5525c724a219-kube-api-access-csfw5\") pod \"90bf95cf-3c80-48fe-874d-5525c724a219\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.246982 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-ovsdbserver-sb\") pod \"90bf95cf-3c80-48fe-874d-5525c724a219\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.247093 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-dns-svc\") pod \"90bf95cf-3c80-48fe-874d-5525c724a219\" (UID: \"90bf95cf-3c80-48fe-874d-5525c724a219\") " Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.253820 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90bf95cf-3c80-48fe-874d-5525c724a219-kube-api-access-csfw5" (OuterVolumeSpecName: "kube-api-access-csfw5") pod "90bf95cf-3c80-48fe-874d-5525c724a219" (UID: "90bf95cf-3c80-48fe-874d-5525c724a219"). InnerVolumeSpecName "kube-api-access-csfw5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.291970 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-config" (OuterVolumeSpecName: "config") pod "90bf95cf-3c80-48fe-874d-5525c724a219" (UID: "90bf95cf-3c80-48fe-874d-5525c724a219"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.292070 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "90bf95cf-3c80-48fe-874d-5525c724a219" (UID: "90bf95cf-3c80-48fe-874d-5525c724a219"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.294683 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "90bf95cf-3c80-48fe-874d-5525c724a219" (UID: "90bf95cf-3c80-48fe-874d-5525c724a219"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.313395 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "90bf95cf-3c80-48fe-874d-5525c724a219" (UID: "90bf95cf-3c80-48fe-874d-5525c724a219"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.350113 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.350305 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.350363 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.350414 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-csfw5\" (UniqueName: \"kubernetes.io/projected/90bf95cf-3c80-48fe-874d-5525c724a219-kube-api-access-csfw5\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:13 crc kubenswrapper[4784]: I0106 08:34:13.350493 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/90bf95cf-3c80-48fe-874d-5525c724a219-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.046606 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-5fw7r" Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.119640 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-5fw7r"] Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.129063 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-5fw7r"] Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.327104 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90bf95cf-3c80-48fe-874d-5525c724a219" path="/var/lib/kubelet/pods/90bf95cf-3c80-48fe-874d-5525c724a219/volumes" Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.350813 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.350925 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.581218 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-vwmz9" Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.683523 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-config-data\") pod \"81b4221b-0db4-4391-a152-951dbe6700cb\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.683624 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-db-sync-config-data\") pod \"81b4221b-0db4-4391-a152-951dbe6700cb\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.683827 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-combined-ca-bundle\") pod \"81b4221b-0db4-4391-a152-951dbe6700cb\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.684003 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sljff\" (UniqueName: \"kubernetes.io/projected/81b4221b-0db4-4391-a152-951dbe6700cb-kube-api-access-sljff\") pod \"81b4221b-0db4-4391-a152-951dbe6700cb\" (UID: \"81b4221b-0db4-4391-a152-951dbe6700cb\") " Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.696802 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "81b4221b-0db4-4391-a152-951dbe6700cb" (UID: "81b4221b-0db4-4391-a152-951dbe6700cb"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.721837 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81b4221b-0db4-4391-a152-951dbe6700cb-kube-api-access-sljff" (OuterVolumeSpecName: "kube-api-access-sljff") pod "81b4221b-0db4-4391-a152-951dbe6700cb" (UID: "81b4221b-0db4-4391-a152-951dbe6700cb"). InnerVolumeSpecName "kube-api-access-sljff". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.770759 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "81b4221b-0db4-4391-a152-951dbe6700cb" (UID: "81b4221b-0db4-4391-a152-951dbe6700cb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.790159 4784 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.790247 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.790263 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sljff\" (UniqueName: \"kubernetes.io/projected/81b4221b-0db4-4391-a152-951dbe6700cb-kube-api-access-sljff\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.840735 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-config-data" (OuterVolumeSpecName: "config-data") pod "81b4221b-0db4-4391-a152-951dbe6700cb" (UID: "81b4221b-0db4-4391-a152-951dbe6700cb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:14 crc kubenswrapper[4784]: I0106 08:34:14.891917 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81b4221b-0db4-4391-a152-951dbe6700cb-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.056224 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-vwmz9" event={"ID":"81b4221b-0db4-4391-a152-951dbe6700cb","Type":"ContainerDied","Data":"b26f9e7256d8c19045f06c665ba462a488073f7d2ed9bf32c9fe4bd85b3ac705"} Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.056275 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b26f9e7256d8c19045f06c665ba462a488073f7d2ed9bf32c9fe4bd85b3ac705" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.056344 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-vwmz9" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.547710 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6dbc684849-9kz74"] Jan 06 08:34:15 crc kubenswrapper[4784]: E0106 08:34:15.548622 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee0707c4-0382-44eb-b6e9-0a2c076862ae" containerName="mariadb-account-create-update" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.548639 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee0707c4-0382-44eb-b6e9-0a2c076862ae" containerName="mariadb-account-create-update" Jan 06 08:34:15 crc kubenswrapper[4784]: E0106 08:34:15.548651 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93749da5-57fd-4761-aa84-95c9cec12e52" containerName="mariadb-account-create-update" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.548658 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="93749da5-57fd-4761-aa84-95c9cec12e52" containerName="mariadb-account-create-update" Jan 06 08:34:15 crc kubenswrapper[4784]: E0106 08:34:15.548666 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9ede4c2-499b-4c2c-8aa5-11344298ebae" containerName="mariadb-database-create" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.548673 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9ede4c2-499b-4c2c-8aa5-11344298ebae" containerName="mariadb-database-create" Jan 06 08:34:15 crc kubenswrapper[4784]: E0106 08:34:15.548682 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81b4221b-0db4-4391-a152-951dbe6700cb" containerName="glance-db-sync" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.548688 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="81b4221b-0db4-4391-a152-951dbe6700cb" containerName="glance-db-sync" Jan 06 08:34:15 crc kubenswrapper[4784]: E0106 08:34:15.548704 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45abd702-0d1f-4e81-b043-9cbd8ed1591b" containerName="mariadb-database-create" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.548712 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="45abd702-0d1f-4e81-b043-9cbd8ed1591b" containerName="mariadb-database-create" Jan 06 08:34:15 crc kubenswrapper[4784]: E0106 08:34:15.548725 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="441e6fe5-5597-44c2-883e-7279b94a858c" containerName="mariadb-account-create-update" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.548731 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="441e6fe5-5597-44c2-883e-7279b94a858c" containerName="mariadb-account-create-update" Jan 06 08:34:15 crc kubenswrapper[4784]: E0106 08:34:15.548750 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f091be74-b0f9-4291-9202-20e877c55b30" containerName="mariadb-database-create" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.548757 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f091be74-b0f9-4291-9202-20e877c55b30" containerName="mariadb-database-create" Jan 06 08:34:15 crc kubenswrapper[4784]: E0106 08:34:15.548773 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90bf95cf-3c80-48fe-874d-5525c724a219" containerName="init" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.548780 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="90bf95cf-3c80-48fe-874d-5525c724a219" containerName="init" Jan 06 08:34:15 crc kubenswrapper[4784]: E0106 08:34:15.548791 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90bf95cf-3c80-48fe-874d-5525c724a219" containerName="dnsmasq-dns" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.548796 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="90bf95cf-3c80-48fe-874d-5525c724a219" containerName="dnsmasq-dns" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.548943 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="441e6fe5-5597-44c2-883e-7279b94a858c" containerName="mariadb-account-create-update" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.548956 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="93749da5-57fd-4761-aa84-95c9cec12e52" containerName="mariadb-account-create-update" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.548970 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9ede4c2-499b-4c2c-8aa5-11344298ebae" containerName="mariadb-database-create" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.548979 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="90bf95cf-3c80-48fe-874d-5525c724a219" containerName="dnsmasq-dns" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.548990 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f091be74-b0f9-4291-9202-20e877c55b30" containerName="mariadb-database-create" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.549000 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="81b4221b-0db4-4391-a152-951dbe6700cb" containerName="glance-db-sync" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.549010 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee0707c4-0382-44eb-b6e9-0a2c076862ae" containerName="mariadb-account-create-update" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.549021 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="45abd702-0d1f-4e81-b043-9cbd8ed1591b" containerName="mariadb-database-create" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.549911 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.592826 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6dbc684849-9kz74"] Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.712194 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xffj7\" (UniqueName: \"kubernetes.io/projected/b957b8c5-25c3-453a-bd68-bab5fd0007b3-kube-api-access-xffj7\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.712336 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-dns-svc\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.712413 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-ovsdbserver-sb\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.712442 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-config\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.712495 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-ovsdbserver-nb\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.712584 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-dns-swift-storage-0\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.814305 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-ovsdbserver-sb\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.814412 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-config\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.814447 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-ovsdbserver-nb\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.814496 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-dns-swift-storage-0\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.814520 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xffj7\" (UniqueName: \"kubernetes.io/projected/b957b8c5-25c3-453a-bd68-bab5fd0007b3-kube-api-access-xffj7\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.814662 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-dns-svc\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.815529 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-dns-swift-storage-0\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.815724 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-ovsdbserver-nb\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.815725 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-dns-svc\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.816103 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-config\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.816271 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-ovsdbserver-sb\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.834928 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xffj7\" (UniqueName: \"kubernetes.io/projected/b957b8c5-25c3-453a-bd68-bab5fd0007b3-kube-api-access-xffj7\") pod \"dnsmasq-dns-6dbc684849-9kz74\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:15 crc kubenswrapper[4784]: I0106 08:34:15.908959 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:16 crc kubenswrapper[4784]: I0106 08:34:16.071609 4784 generic.go:334] "Generic (PLEG): container finished" podID="83a1fa13-fedb-4baf-bd94-7ce99bbeefab" containerID="6aed5d5bdb3e840d82bc70bce9e03cc3f9557785201d5d0f6120d8c4bbbe42b2" exitCode=0 Jan 06 08:34:16 crc kubenswrapper[4784]: I0106 08:34:16.071668 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gp9r7" event={"ID":"83a1fa13-fedb-4baf-bd94-7ce99bbeefab","Type":"ContainerDied","Data":"6aed5d5bdb3e840d82bc70bce9e03cc3f9557785201d5d0f6120d8c4bbbe42b2"} Jan 06 08:34:16 crc kubenswrapper[4784]: I0106 08:34:16.554940 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6dbc684849-9kz74"] Jan 06 08:34:16 crc kubenswrapper[4784]: W0106 08:34:16.559959 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb957b8c5_25c3_453a_bd68_bab5fd0007b3.slice/crio-09ee329dce3cff178cf571f3387aa604e3309e6b900cc81711386ba65b87724f WatchSource:0}: Error finding container 09ee329dce3cff178cf571f3387aa604e3309e6b900cc81711386ba65b87724f: Status 404 returned error can't find the container with id 09ee329dce3cff178cf571f3387aa604e3309e6b900cc81711386ba65b87724f Jan 06 08:34:17 crc kubenswrapper[4784]: I0106 08:34:17.081847 4784 generic.go:334] "Generic (PLEG): container finished" podID="b957b8c5-25c3-453a-bd68-bab5fd0007b3" containerID="10b6519de147bb5eb63af8e7a59677e7aeea199b7942da4cb0385d20bf93c22c" exitCode=0 Jan 06 08:34:17 crc kubenswrapper[4784]: I0106 08:34:17.081971 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dbc684849-9kz74" event={"ID":"b957b8c5-25c3-453a-bd68-bab5fd0007b3","Type":"ContainerDied","Data":"10b6519de147bb5eb63af8e7a59677e7aeea199b7942da4cb0385d20bf93c22c"} Jan 06 08:34:17 crc kubenswrapper[4784]: I0106 08:34:17.082686 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dbc684849-9kz74" event={"ID":"b957b8c5-25c3-453a-bd68-bab5fd0007b3","Type":"ContainerStarted","Data":"09ee329dce3cff178cf571f3387aa604e3309e6b900cc81711386ba65b87724f"} Jan 06 08:34:17 crc kubenswrapper[4784]: I0106 08:34:17.396292 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gp9r7" Jan 06 08:34:17 crc kubenswrapper[4784]: I0106 08:34:17.458092 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-combined-ca-bundle\") pod \"83a1fa13-fedb-4baf-bd94-7ce99bbeefab\" (UID: \"83a1fa13-fedb-4baf-bd94-7ce99bbeefab\") " Jan 06 08:34:17 crc kubenswrapper[4784]: I0106 08:34:17.458642 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-config-data\") pod \"83a1fa13-fedb-4baf-bd94-7ce99bbeefab\" (UID: \"83a1fa13-fedb-4baf-bd94-7ce99bbeefab\") " Jan 06 08:34:17 crc kubenswrapper[4784]: I0106 08:34:17.458739 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fx997\" (UniqueName: \"kubernetes.io/projected/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-kube-api-access-fx997\") pod \"83a1fa13-fedb-4baf-bd94-7ce99bbeefab\" (UID: \"83a1fa13-fedb-4baf-bd94-7ce99bbeefab\") " Jan 06 08:34:17 crc kubenswrapper[4784]: I0106 08:34:17.465575 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-kube-api-access-fx997" (OuterVolumeSpecName: "kube-api-access-fx997") pod "83a1fa13-fedb-4baf-bd94-7ce99bbeefab" (UID: "83a1fa13-fedb-4baf-bd94-7ce99bbeefab"). InnerVolumeSpecName "kube-api-access-fx997". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:17 crc kubenswrapper[4784]: I0106 08:34:17.486858 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "83a1fa13-fedb-4baf-bd94-7ce99bbeefab" (UID: "83a1fa13-fedb-4baf-bd94-7ce99bbeefab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:17 crc kubenswrapper[4784]: I0106 08:34:17.506389 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-config-data" (OuterVolumeSpecName: "config-data") pod "83a1fa13-fedb-4baf-bd94-7ce99bbeefab" (UID: "83a1fa13-fedb-4baf-bd94-7ce99bbeefab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:17 crc kubenswrapper[4784]: I0106 08:34:17.560436 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:17 crc kubenswrapper[4784]: I0106 08:34:17.560493 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:17 crc kubenswrapper[4784]: I0106 08:34:17.560507 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fx997\" (UniqueName: \"kubernetes.io/projected/83a1fa13-fedb-4baf-bd94-7ce99bbeefab-kube-api-access-fx997\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.093673 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gp9r7" event={"ID":"83a1fa13-fedb-4baf-bd94-7ce99bbeefab","Type":"ContainerDied","Data":"7302722c598f6203ceee8b862689e97ec23a1b0536c03ad2faaf189179aa89ae"} Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.093721 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7302722c598f6203ceee8b862689e97ec23a1b0536c03ad2faaf189179aa89ae" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.093759 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gp9r7" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.096453 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dbc684849-9kz74" event={"ID":"b957b8c5-25c3-453a-bd68-bab5fd0007b3","Type":"ContainerStarted","Data":"bc7d01e7af0e1c3d73edc48f606cc9e7304a13afe6aad0eb1d7026563bf8f1d5"} Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.096739 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.123070 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6dbc684849-9kz74" podStartSLOduration=3.123048588 podStartE2EDuration="3.123048588s" podCreationTimestamp="2026-01-06 08:34:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:18.120353014 +0000 UTC m=+1160.166525901" watchObservedRunningTime="2026-01-06 08:34:18.123048588 +0000 UTC m=+1160.169221425" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.411174 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-m2rvw"] Jan 06 08:34:18 crc kubenswrapper[4784]: E0106 08:34:18.412093 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83a1fa13-fedb-4baf-bd94-7ce99bbeefab" containerName="keystone-db-sync" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.412114 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="83a1fa13-fedb-4baf-bd94-7ce99bbeefab" containerName="keystone-db-sync" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.412271 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="83a1fa13-fedb-4baf-bd94-7ce99bbeefab" containerName="keystone-db-sync" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.412942 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.419587 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.421941 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.422088 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-lgkm4" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.422219 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.422320 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.440801 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6dbc684849-9kz74"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.464754 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-m2rvw"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.503619 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6974cb66c7-79vhs"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.505492 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.515800 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6974cb66c7-79vhs"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.555144 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-j4gnj"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.564286 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-j4gnj" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.568971 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.569151 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.569262 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-88lbj" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.581382 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-combined-ca-bundle\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.581468 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-config-data\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.581488 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-fernet-keys\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.581504 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-scripts\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.581559 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-credential-keys\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.581580 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgx57\" (UniqueName: \"kubernetes.io/projected/5a024bf3-6906-497e-9973-458b271505e1-kube-api-access-bgx57\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.642777 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-57tfg"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.644263 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.650573 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-vtlzw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.650954 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.651080 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.672943 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.675134 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.677150 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.677377 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.683619 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-dns-svc\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.683689 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-combined-ca-bundle\") pod \"neutron-db-sync-j4gnj\" (UID: \"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf\") " pod="openstack/neutron-db-sync-j4gnj" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.683718 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-dns-swift-storage-0\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.683734 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scm79\" (UniqueName: \"kubernetes.io/projected/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-kube-api-access-scm79\") pod \"neutron-db-sync-j4gnj\" (UID: \"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf\") " pod="openstack/neutron-db-sync-j4gnj" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.683772 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-config\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.683799 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-combined-ca-bundle\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.683822 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zlj2j\" (UniqueName: \"kubernetes.io/projected/14cecad5-f676-45e0-958e-2ba1779784d4-kube-api-access-zlj2j\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.683846 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-ovsdbserver-sb\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.683901 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-config-data\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.683919 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-fernet-keys\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.683938 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-config\") pod \"neutron-db-sync-j4gnj\" (UID: \"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf\") " pod="openstack/neutron-db-sync-j4gnj" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.683956 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-scripts\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.683986 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-ovsdbserver-nb\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.684018 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-credential-keys\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.684040 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgx57\" (UniqueName: \"kubernetes.io/projected/5a024bf3-6906-497e-9973-458b271505e1-kube-api-access-bgx57\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.693956 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-config-data\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.710711 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-fernet-keys\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.713106 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-scripts\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.714326 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-combined-ca-bundle\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.714449 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-57tfg"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.715389 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-credential-keys\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.726196 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgx57\" (UniqueName: \"kubernetes.io/projected/5a024bf3-6906-497e-9973-458b271505e1-kube-api-access-bgx57\") pod \"keystone-bootstrap-m2rvw\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.726278 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-j4gnj"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.731242 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.755403 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.768033 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-7qsrr"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.769397 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-7qsrr" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.772095 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.778148 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-mzt7s" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786473 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfbrd\" (UniqueName: \"kubernetes.io/projected/a29c392c-7fa2-4a80-b072-92b8201616b8-kube-api-access-cfbrd\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786563 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-config\") pod \"neutron-db-sync-j4gnj\" (UID: \"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf\") " pod="openstack/neutron-db-sync-j4gnj" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786600 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-ovsdbserver-nb\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786623 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-config-data\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786651 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf077c5c-dec8-41a6-8677-e5f7681c83e5-log-httpd\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786671 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf077c5c-dec8-41a6-8677-e5f7681c83e5-run-httpd\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786708 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-dns-svc\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786745 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-combined-ca-bundle\") pod \"neutron-db-sync-j4gnj\" (UID: \"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf\") " pod="openstack/neutron-db-sync-j4gnj" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786770 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scm79\" (UniqueName: \"kubernetes.io/projected/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-kube-api-access-scm79\") pod \"neutron-db-sync-j4gnj\" (UID: \"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf\") " pod="openstack/neutron-db-sync-j4gnj" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786788 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-dns-swift-storage-0\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786815 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a29c392c-7fa2-4a80-b072-92b8201616b8-etc-machine-id\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786849 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-config\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786875 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zlj2j\" (UniqueName: \"kubernetes.io/projected/14cecad5-f676-45e0-958e-2ba1779784d4-kube-api-access-zlj2j\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786898 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-scripts\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786919 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786937 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-ovsdbserver-sb\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786957 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-combined-ca-bundle\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.786983 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.787002 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmxtv\" (UniqueName: \"kubernetes.io/projected/bf077c5c-dec8-41a6-8677-e5f7681c83e5-kube-api-access-jmxtv\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.787022 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-config-data\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.787040 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-db-sync-config-data\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.787057 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-scripts\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.790076 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-config\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.791006 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-ovsdbserver-nb\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.798342 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-dns-svc\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.807793 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-combined-ca-bundle\") pod \"neutron-db-sync-j4gnj\" (UID: \"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf\") " pod="openstack/neutron-db-sync-j4gnj" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.807923 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-7qsrr"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.810025 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-ovsdbserver-sb\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.820408 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-dns-swift-storage-0\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.830133 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-config\") pod \"neutron-db-sync-j4gnj\" (UID: \"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf\") " pod="openstack/neutron-db-sync-j4gnj" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.838248 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zlj2j\" (UniqueName: \"kubernetes.io/projected/14cecad5-f676-45e0-958e-2ba1779784d4-kube-api-access-zlj2j\") pod \"dnsmasq-dns-6974cb66c7-79vhs\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.860229 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.866445 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scm79\" (UniqueName: \"kubernetes.io/projected/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-kube-api-access-scm79\") pod \"neutron-db-sync-j4gnj\" (UID: \"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf\") " pod="openstack/neutron-db-sync-j4gnj" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.889839 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dd652b3-9755-47c0-a4cc-c39c86d840f3-combined-ca-bundle\") pod \"barbican-db-sync-7qsrr\" (UID: \"0dd652b3-9755-47c0-a4cc-c39c86d840f3\") " pod="openstack/barbican-db-sync-7qsrr" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.889954 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a29c392c-7fa2-4a80-b072-92b8201616b8-etc-machine-id\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.890005 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-scripts\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.890030 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.890055 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-combined-ca-bundle\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.890078 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9nfl\" (UniqueName: \"kubernetes.io/projected/0dd652b3-9755-47c0-a4cc-c39c86d840f3-kube-api-access-p9nfl\") pod \"barbican-db-sync-7qsrr\" (UID: \"0dd652b3-9755-47c0-a4cc-c39c86d840f3\") " pod="openstack/barbican-db-sync-7qsrr" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.890115 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.890137 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmxtv\" (UniqueName: \"kubernetes.io/projected/bf077c5c-dec8-41a6-8677-e5f7681c83e5-kube-api-access-jmxtv\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.890164 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-db-sync-config-data\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.890184 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-config-data\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.890206 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-scripts\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.890230 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfbrd\" (UniqueName: \"kubernetes.io/projected/a29c392c-7fa2-4a80-b072-92b8201616b8-kube-api-access-cfbrd\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.890264 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0dd652b3-9755-47c0-a4cc-c39c86d840f3-db-sync-config-data\") pod \"barbican-db-sync-7qsrr\" (UID: \"0dd652b3-9755-47c0-a4cc-c39c86d840f3\") " pod="openstack/barbican-db-sync-7qsrr" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.890316 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-config-data\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.890348 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf077c5c-dec8-41a6-8677-e5f7681c83e5-log-httpd\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.890375 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf077c5c-dec8-41a6-8677-e5f7681c83e5-run-httpd\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.891122 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf077c5c-dec8-41a6-8677-e5f7681c83e5-run-httpd\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.891186 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a29c392c-7fa2-4a80-b072-92b8201616b8-etc-machine-id\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.903050 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-config-data\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.903388 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf077c5c-dec8-41a6-8677-e5f7681c83e5-log-httpd\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.903743 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-db-sync-config-data\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.904163 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-j4gnj" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.906308 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.907503 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6974cb66c7-79vhs"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.907701 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-combined-ca-bundle\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.909443 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-config-data\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.913655 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-scripts\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.916945 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-scripts\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.929525 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.931310 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-9bdd7"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.932787 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.935244 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmxtv\" (UniqueName: \"kubernetes.io/projected/bf077c5c-dec8-41a6-8677-e5f7681c83e5-kube-api-access-jmxtv\") pod \"ceilometer-0\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " pod="openstack/ceilometer-0" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.938101 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.938456 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-2vb5p" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.938616 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.939273 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfbrd\" (UniqueName: \"kubernetes.io/projected/a29c392c-7fa2-4a80-b072-92b8201616b8-kube-api-access-cfbrd\") pod \"cinder-db-sync-57tfg\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.944210 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-66567888d7-whnpc"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.948048 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.963204 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-9bdd7"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.968598 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-57tfg" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.978053 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66567888d7-whnpc"] Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.999099 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9nfl\" (UniqueName: \"kubernetes.io/projected/0dd652b3-9755-47c0-a4cc-c39c86d840f3-kube-api-access-p9nfl\") pod \"barbican-db-sync-7qsrr\" (UID: \"0dd652b3-9755-47c0-a4cc-c39c86d840f3\") " pod="openstack/barbican-db-sync-7qsrr" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.999749 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0dd652b3-9755-47c0-a4cc-c39c86d840f3-db-sync-config-data\") pod \"barbican-db-sync-7qsrr\" (UID: \"0dd652b3-9755-47c0-a4cc-c39c86d840f3\") " pod="openstack/barbican-db-sync-7qsrr" Jan 06 08:34:18 crc kubenswrapper[4784]: I0106 08:34:18.999959 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dd652b3-9755-47c0-a4cc-c39c86d840f3-combined-ca-bundle\") pod \"barbican-db-sync-7qsrr\" (UID: \"0dd652b3-9755-47c0-a4cc-c39c86d840f3\") " pod="openstack/barbican-db-sync-7qsrr" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.008508 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dd652b3-9755-47c0-a4cc-c39c86d840f3-combined-ca-bundle\") pod \"barbican-db-sync-7qsrr\" (UID: \"0dd652b3-9755-47c0-a4cc-c39c86d840f3\") " pod="openstack/barbican-db-sync-7qsrr" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.023045 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9nfl\" (UniqueName: \"kubernetes.io/projected/0dd652b3-9755-47c0-a4cc-c39c86d840f3-kube-api-access-p9nfl\") pod \"barbican-db-sync-7qsrr\" (UID: \"0dd652b3-9755-47c0-a4cc-c39c86d840f3\") " pod="openstack/barbican-db-sync-7qsrr" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.023742 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0dd652b3-9755-47c0-a4cc-c39c86d840f3-db-sync-config-data\") pod \"barbican-db-sync-7qsrr\" (UID: \"0dd652b3-9755-47c0-a4cc-c39c86d840f3\") " pod="openstack/barbican-db-sync-7qsrr" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.101475 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-ovsdbserver-sb\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.101527 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-dns-svc\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.101571 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-ovsdbserver-nb\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.101596 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-dns-swift-storage-0\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.101616 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-config\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.101661 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wptm4\" (UniqueName: \"kubernetes.io/projected/7a172939-8d39-40a2-9b41-1eee48de7be5-kube-api-access-wptm4\") pod \"placement-db-sync-9bdd7\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.101689 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-scripts\") pod \"placement-db-sync-9bdd7\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.101720 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a172939-8d39-40a2-9b41-1eee48de7be5-logs\") pod \"placement-db-sync-9bdd7\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.101749 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-combined-ca-bundle\") pod \"placement-db-sync-9bdd7\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.101839 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tq9r\" (UniqueName: \"kubernetes.io/projected/977e57a8-a139-41d0-b757-2ea7134bd9ce-kube-api-access-4tq9r\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.101862 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-config-data\") pod \"placement-db-sync-9bdd7\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.203512 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wptm4\" (UniqueName: \"kubernetes.io/projected/7a172939-8d39-40a2-9b41-1eee48de7be5-kube-api-access-wptm4\") pod \"placement-db-sync-9bdd7\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.203598 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-scripts\") pod \"placement-db-sync-9bdd7\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.203648 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a172939-8d39-40a2-9b41-1eee48de7be5-logs\") pod \"placement-db-sync-9bdd7\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.203669 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-combined-ca-bundle\") pod \"placement-db-sync-9bdd7\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.203766 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tq9r\" (UniqueName: \"kubernetes.io/projected/977e57a8-a139-41d0-b757-2ea7134bd9ce-kube-api-access-4tq9r\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.203830 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-config-data\") pod \"placement-db-sync-9bdd7\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.203923 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-ovsdbserver-sb\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.203971 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-dns-svc\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.203990 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-ovsdbserver-nb\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.204006 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-dns-swift-storage-0\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.204039 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-config\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.205838 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-dns-svc\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.205856 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-ovsdbserver-sb\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.206623 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-dns-swift-storage-0\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.207151 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-config\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.207758 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-ovsdbserver-nb\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.209626 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a172939-8d39-40a2-9b41-1eee48de7be5-logs\") pod \"placement-db-sync-9bdd7\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.211378 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-config-data\") pod \"placement-db-sync-9bdd7\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.214245 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.263578 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-7qsrr" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.265324 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tq9r\" (UniqueName: \"kubernetes.io/projected/977e57a8-a139-41d0-b757-2ea7134bd9ce-kube-api-access-4tq9r\") pod \"dnsmasq-dns-66567888d7-whnpc\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.265466 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-combined-ca-bundle\") pod \"placement-db-sync-9bdd7\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.275250 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-scripts\") pod \"placement-db-sync-9bdd7\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.298671 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.308696 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wptm4\" (UniqueName: \"kubernetes.io/projected/7a172939-8d39-40a2-9b41-1eee48de7be5-kube-api-access-wptm4\") pod \"placement-db-sync-9bdd7\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.425199 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-m2rvw"] Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.550753 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.552472 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.562588 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.562851 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.568965 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-8kx7t" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.580480 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.595808 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.688498 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.691193 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.698973 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.713656 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.745651 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/32caa57d-b418-4276-8718-f230dcfd282e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.745719 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32caa57d-b418-4276-8718-f230dcfd282e-logs\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.745764 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltqhj\" (UniqueName: \"kubernetes.io/projected/32caa57d-b418-4276-8718-f230dcfd282e-kube-api-access-ltqhj\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.745831 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.746007 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-scripts\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.746038 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.746063 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-config-data\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.782993 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-j4gnj"] Jan 06 08:34:19 crc kubenswrapper[4784]: W0106 08:34:19.809921 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f6f606e_fa4d_45d8_95a1_04a052ca0dbf.slice/crio-f56a19529c602f97f5df85278e41f96a5ab07e38da20256fa3a6266e694b4931 WatchSource:0}: Error finding container f56a19529c602f97f5df85278e41f96a5ab07e38da20256fa3a6266e694b4931: Status 404 returned error can't find the container with id f56a19529c602f97f5df85278e41f96a5ab07e38da20256fa3a6266e694b4931 Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.826667 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6974cb66c7-79vhs"] Jan 06 08:34:19 crc kubenswrapper[4784]: W0106 08:34:19.861777 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14cecad5_f676_45e0_958e_2ba1779784d4.slice/crio-bf11826e9d8c83e6e3dde8dec0df7862bc58e12ba2964fe246d3672294e75d54 WatchSource:0}: Error finding container bf11826e9d8c83e6e3dde8dec0df7862bc58e12ba2964fe246d3672294e75d54: Status 404 returned error can't find the container with id bf11826e9d8c83e6e3dde8dec0df7862bc58e12ba2964fe246d3672294e75d54 Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.873340 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.873410 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.873557 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.873630 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdmdk\" (UniqueName: \"kubernetes.io/projected/144afc20-0318-4972-8caa-87553550e6f4-kube-api-access-kdmdk\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.873744 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.873783 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-scripts\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.873815 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.873840 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-config-data\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.873933 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/144afc20-0318-4972-8caa-87553550e6f4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.874015 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/144afc20-0318-4972-8caa-87553550e6f4-logs\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.874109 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.874216 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32caa57d-b418-4276-8718-f230dcfd282e-logs\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.874241 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/32caa57d-b418-4276-8718-f230dcfd282e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.874308 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ltqhj\" (UniqueName: \"kubernetes.io/projected/32caa57d-b418-4276-8718-f230dcfd282e-kube-api-access-ltqhj\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.874984 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.876629 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32caa57d-b418-4276-8718-f230dcfd282e-logs\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.877350 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/32caa57d-b418-4276-8718-f230dcfd282e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.882125 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.887593 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-config-data\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.909256 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-scripts\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.911646 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ltqhj\" (UniqueName: \"kubernetes.io/projected/32caa57d-b418-4276-8718-f230dcfd282e-kube-api-access-ltqhj\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.918915 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.967442 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.977304 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.977407 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.977449 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdmdk\" (UniqueName: \"kubernetes.io/projected/144afc20-0318-4972-8caa-87553550e6f4-kube-api-access-kdmdk\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.977640 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.977705 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/144afc20-0318-4972-8caa-87553550e6f4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.977753 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/144afc20-0318-4972-8caa-87553550e6f4-logs\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.977808 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.978481 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.979140 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/144afc20-0318-4972-8caa-87553550e6f4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.979429 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/144afc20-0318-4972-8caa-87553550e6f4-logs\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.982590 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.986603 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:19 crc kubenswrapper[4784]: I0106 08:34:19.987451 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.005192 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdmdk\" (UniqueName: \"kubernetes.io/projected/144afc20-0318-4972-8caa-87553550e6f4-kube-api-access-kdmdk\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.007681 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-57tfg"] Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.023145 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.111073 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.157216 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-57tfg" event={"ID":"a29c392c-7fa2-4a80-b072-92b8201616b8","Type":"ContainerStarted","Data":"b5f497f044510f42868bd80e8da919b87b61f8a7421018d7cbb9c6538fe8df3b"} Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.160698 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" event={"ID":"14cecad5-f676-45e0-958e-2ba1779784d4","Type":"ContainerStarted","Data":"bf11826e9d8c83e6e3dde8dec0df7862bc58e12ba2964fe246d3672294e75d54"} Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.163596 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-j4gnj" event={"ID":"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf","Type":"ContainerStarted","Data":"f56a19529c602f97f5df85278e41f96a5ab07e38da20256fa3a6266e694b4931"} Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.166489 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m2rvw" event={"ID":"5a024bf3-6906-497e-9973-458b271505e1","Type":"ContainerStarted","Data":"4054deb074a112412430df78be1ca870064ab339204c29f58878b697b84964a7"} Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.166682 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m2rvw" event={"ID":"5a024bf3-6906-497e-9973-458b271505e1","Type":"ContainerStarted","Data":"d74639cd72fa68c9bddabbcb594ed86110e170102e8ecb584a87c634f5db2874"} Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.167029 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6dbc684849-9kz74" podUID="b957b8c5-25c3-453a-bd68-bab5fd0007b3" containerName="dnsmasq-dns" containerID="cri-o://bc7d01e7af0e1c3d73edc48f606cc9e7304a13afe6aad0eb1d7026563bf8f1d5" gracePeriod=10 Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.192868 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-m2rvw" podStartSLOduration=2.192828309 podStartE2EDuration="2.192828309s" podCreationTimestamp="2026-01-06 08:34:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:20.188133083 +0000 UTC m=+1162.234305910" watchObservedRunningTime="2026-01-06 08:34:20.192828309 +0000 UTC m=+1162.239001146" Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.400426 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-7qsrr"] Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.424347 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66567888d7-whnpc"] Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.431646 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.439701 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-9bdd7"] Jan 06 08:34:20 crc kubenswrapper[4784]: W0106 08:34:20.441323 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbf077c5c_dec8_41a6_8677_e5f7681c83e5.slice/crio-61db0b6eed5e929819192ee3637fbe416f0c424c74af61db96a5177f602cc337 WatchSource:0}: Error finding container 61db0b6eed5e929819192ee3637fbe416f0c424c74af61db96a5177f602cc337: Status 404 returned error can't find the container with id 61db0b6eed5e929819192ee3637fbe416f0c424c74af61db96a5177f602cc337 Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.679584 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.694877 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.797904 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-config\") pod \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.798622 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-dns-swift-storage-0\") pod \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.798699 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-ovsdbserver-nb\") pod \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.798750 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xffj7\" (UniqueName: \"kubernetes.io/projected/b957b8c5-25c3-453a-bd68-bab5fd0007b3-kube-api-access-xffj7\") pod \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.798912 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-ovsdbserver-sb\") pod \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.798954 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-dns-svc\") pod \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\" (UID: \"b957b8c5-25c3-453a-bd68-bab5fd0007b3\") " Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.816871 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b957b8c5-25c3-453a-bd68-bab5fd0007b3-kube-api-access-xffj7" (OuterVolumeSpecName: "kube-api-access-xffj7") pod "b957b8c5-25c3-453a-bd68-bab5fd0007b3" (UID: "b957b8c5-25c3-453a-bd68-bab5fd0007b3"). InnerVolumeSpecName "kube-api-access-xffj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.818149 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.896752 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b957b8c5-25c3-453a-bd68-bab5fd0007b3" (UID: "b957b8c5-25c3-453a-bd68-bab5fd0007b3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.902058 4784 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.902081 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xffj7\" (UniqueName: \"kubernetes.io/projected/b957b8c5-25c3-453a-bd68-bab5fd0007b3-kube-api-access-xffj7\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.919325 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b957b8c5-25c3-453a-bd68-bab5fd0007b3" (UID: "b957b8c5-25c3-453a-bd68-bab5fd0007b3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.920355 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b957b8c5-25c3-453a-bd68-bab5fd0007b3" (UID: "b957b8c5-25c3-453a-bd68-bab5fd0007b3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.945000 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-config" (OuterVolumeSpecName: "config") pod "b957b8c5-25c3-453a-bd68-bab5fd0007b3" (UID: "b957b8c5-25c3-453a-bd68-bab5fd0007b3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:20 crc kubenswrapper[4784]: I0106 08:34:20.974074 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b957b8c5-25c3-453a-bd68-bab5fd0007b3" (UID: "b957b8c5-25c3-453a-bd68-bab5fd0007b3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.016062 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.016109 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.016121 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.016133 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b957b8c5-25c3-453a-bd68-bab5fd0007b3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.264892 4784 generic.go:334] "Generic (PLEG): container finished" podID="b957b8c5-25c3-453a-bd68-bab5fd0007b3" containerID="bc7d01e7af0e1c3d73edc48f606cc9e7304a13afe6aad0eb1d7026563bf8f1d5" exitCode=0 Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.265040 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dbc684849-9kz74" event={"ID":"b957b8c5-25c3-453a-bd68-bab5fd0007b3","Type":"ContainerDied","Data":"bc7d01e7af0e1c3d73edc48f606cc9e7304a13afe6aad0eb1d7026563bf8f1d5"} Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.265077 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dbc684849-9kz74" event={"ID":"b957b8c5-25c3-453a-bd68-bab5fd0007b3","Type":"ContainerDied","Data":"09ee329dce3cff178cf571f3387aa604e3309e6b900cc81711386ba65b87724f"} Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.265096 4784 scope.go:117] "RemoveContainer" containerID="bc7d01e7af0e1c3d73edc48f606cc9e7304a13afe6aad0eb1d7026563bf8f1d5" Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.265296 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dbc684849-9kz74" Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.299918 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"144afc20-0318-4972-8caa-87553550e6f4","Type":"ContainerStarted","Data":"a6d71c76595addfe7c8bc0a8b89ab838066730225f0e4945288561cff6b7b5af"} Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.305751 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-7qsrr" event={"ID":"0dd652b3-9755-47c0-a4cc-c39c86d840f3","Type":"ContainerStarted","Data":"156a6e3178af10fecb28c6ced39a17bf907a0448118669fde357da6404a79c93"} Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.335786 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6dbc684849-9kz74"] Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.348582 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6dbc684849-9kz74"] Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.380736 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-j4gnj" event={"ID":"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf","Type":"ContainerStarted","Data":"3737d9c9a4f7a004d041d4ac5573b4a1033d115a765b7915b0da0f343e6743af"} Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.423741 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-j4gnj" podStartSLOduration=3.42371868 podStartE2EDuration="3.42371868s" podCreationTimestamp="2026-01-06 08:34:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:21.415871245 +0000 UTC m=+1163.462044082" watchObservedRunningTime="2026-01-06 08:34:21.42371868 +0000 UTC m=+1163.469891517" Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.462055 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-9bdd7" event={"ID":"7a172939-8d39-40a2-9b41-1eee48de7be5","Type":"ContainerStarted","Data":"9354db582338ecaf727558cd89d7e198f05e68cdf1cfa63df21d666b6b647da8"} Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.494943 4784 generic.go:334] "Generic (PLEG): container finished" podID="977e57a8-a139-41d0-b757-2ea7134bd9ce" containerID="d565550505f3c74fbef04eebd288d2476e838f88f2b128a1c88d13d963e0b4c7" exitCode=0 Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.495024 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-whnpc" event={"ID":"977e57a8-a139-41d0-b757-2ea7134bd9ce","Type":"ContainerDied","Data":"d565550505f3c74fbef04eebd288d2476e838f88f2b128a1c88d13d963e0b4c7"} Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.495058 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-whnpc" event={"ID":"977e57a8-a139-41d0-b757-2ea7134bd9ce","Type":"ContainerStarted","Data":"9e2691a4b8901eb304fc753250d138432e90c6b727497c1c620e2b0f2d603413"} Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.499461 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf077c5c-dec8-41a6-8677-e5f7681c83e5","Type":"ContainerStarted","Data":"61db0b6eed5e929819192ee3637fbe416f0c424c74af61db96a5177f602cc337"} Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.508612 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"32caa57d-b418-4276-8718-f230dcfd282e","Type":"ContainerStarted","Data":"e5f81c47df6369e543674aff0ce2581a5d888d3125d90966c6ccde37040e2a85"} Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.513760 4784 generic.go:334] "Generic (PLEG): container finished" podID="14cecad5-f676-45e0-958e-2ba1779784d4" containerID="cd675eff15322276fef4ca5d46462873f40aac2fd7605efc2565209664babe61" exitCode=0 Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.514058 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" event={"ID":"14cecad5-f676-45e0-958e-2ba1779784d4","Type":"ContainerDied","Data":"cd675eff15322276fef4ca5d46462873f40aac2fd7605efc2565209664babe61"} Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.567005 4784 scope.go:117] "RemoveContainer" containerID="10b6519de147bb5eb63af8e7a59677e7aeea199b7942da4cb0385d20bf93c22c" Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.661713 4784 scope.go:117] "RemoveContainer" containerID="bc7d01e7af0e1c3d73edc48f606cc9e7304a13afe6aad0eb1d7026563bf8f1d5" Jan 06 08:34:21 crc kubenswrapper[4784]: E0106 08:34:21.663643 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc7d01e7af0e1c3d73edc48f606cc9e7304a13afe6aad0eb1d7026563bf8f1d5\": container with ID starting with bc7d01e7af0e1c3d73edc48f606cc9e7304a13afe6aad0eb1d7026563bf8f1d5 not found: ID does not exist" containerID="bc7d01e7af0e1c3d73edc48f606cc9e7304a13afe6aad0eb1d7026563bf8f1d5" Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.663688 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc7d01e7af0e1c3d73edc48f606cc9e7304a13afe6aad0eb1d7026563bf8f1d5"} err="failed to get container status \"bc7d01e7af0e1c3d73edc48f606cc9e7304a13afe6aad0eb1d7026563bf8f1d5\": rpc error: code = NotFound desc = could not find container \"bc7d01e7af0e1c3d73edc48f606cc9e7304a13afe6aad0eb1d7026563bf8f1d5\": container with ID starting with bc7d01e7af0e1c3d73edc48f606cc9e7304a13afe6aad0eb1d7026563bf8f1d5 not found: ID does not exist" Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.663714 4784 scope.go:117] "RemoveContainer" containerID="10b6519de147bb5eb63af8e7a59677e7aeea199b7942da4cb0385d20bf93c22c" Jan 06 08:34:21 crc kubenswrapper[4784]: E0106 08:34:21.664372 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10b6519de147bb5eb63af8e7a59677e7aeea199b7942da4cb0385d20bf93c22c\": container with ID starting with 10b6519de147bb5eb63af8e7a59677e7aeea199b7942da4cb0385d20bf93c22c not found: ID does not exist" containerID="10b6519de147bb5eb63af8e7a59677e7aeea199b7942da4cb0385d20bf93c22c" Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.664424 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10b6519de147bb5eb63af8e7a59677e7aeea199b7942da4cb0385d20bf93c22c"} err="failed to get container status \"10b6519de147bb5eb63af8e7a59677e7aeea199b7942da4cb0385d20bf93c22c\": rpc error: code = NotFound desc = could not find container \"10b6519de147bb5eb63af8e7a59677e7aeea199b7942da4cb0385d20bf93c22c\": container with ID starting with 10b6519de147bb5eb63af8e7a59677e7aeea199b7942da4cb0385d20bf93c22c not found: ID does not exist" Jan 06 08:34:21 crc kubenswrapper[4784]: I0106 08:34:21.966201 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.049960 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zlj2j\" (UniqueName: \"kubernetes.io/projected/14cecad5-f676-45e0-958e-2ba1779784d4-kube-api-access-zlj2j\") pod \"14cecad5-f676-45e0-958e-2ba1779784d4\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.050006 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-ovsdbserver-nb\") pod \"14cecad5-f676-45e0-958e-2ba1779784d4\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.050086 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-dns-swift-storage-0\") pod \"14cecad5-f676-45e0-958e-2ba1779784d4\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.050126 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-ovsdbserver-sb\") pod \"14cecad5-f676-45e0-958e-2ba1779784d4\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.050195 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-config\") pod \"14cecad5-f676-45e0-958e-2ba1779784d4\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.050252 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-dns-svc\") pod \"14cecad5-f676-45e0-958e-2ba1779784d4\" (UID: \"14cecad5-f676-45e0-958e-2ba1779784d4\") " Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.065338 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14cecad5-f676-45e0-958e-2ba1779784d4-kube-api-access-zlj2j" (OuterVolumeSpecName: "kube-api-access-zlj2j") pod "14cecad5-f676-45e0-958e-2ba1779784d4" (UID: "14cecad5-f676-45e0-958e-2ba1779784d4"). InnerVolumeSpecName "kube-api-access-zlj2j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.081483 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-config" (OuterVolumeSpecName: "config") pod "14cecad5-f676-45e0-958e-2ba1779784d4" (UID: "14cecad5-f676-45e0-958e-2ba1779784d4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.098609 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "14cecad5-f676-45e0-958e-2ba1779784d4" (UID: "14cecad5-f676-45e0-958e-2ba1779784d4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.111292 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "14cecad5-f676-45e0-958e-2ba1779784d4" (UID: "14cecad5-f676-45e0-958e-2ba1779784d4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.111474 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "14cecad5-f676-45e0-958e-2ba1779784d4" (UID: "14cecad5-f676-45e0-958e-2ba1779784d4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.120249 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "14cecad5-f676-45e0-958e-2ba1779784d4" (UID: "14cecad5-f676-45e0-958e-2ba1779784d4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.152413 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zlj2j\" (UniqueName: \"kubernetes.io/projected/14cecad5-f676-45e0-958e-2ba1779784d4-kube-api-access-zlj2j\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.153104 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.153117 4784 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.153147 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.153159 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.153168 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14cecad5-f676-45e0-958e-2ba1779784d4-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.330247 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b957b8c5-25c3-453a-bd68-bab5fd0007b3" path="/var/lib/kubelet/pods/b957b8c5-25c3-453a-bd68-bab5fd0007b3/volumes" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.544430 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"32caa57d-b418-4276-8718-f230dcfd282e","Type":"ContainerStarted","Data":"473a8ebf44fb7a38859d9d2f26168edc9d3b8babd2f658187f07c7dcf42224d5"} Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.559853 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-whnpc" event={"ID":"977e57a8-a139-41d0-b757-2ea7134bd9ce","Type":"ContainerStarted","Data":"39de4ffa0dfca0f8dd609c556de3fc7cbe886700a71bd19ba29da9492225304c"} Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.560696 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.590988 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"144afc20-0318-4972-8caa-87553550e6f4","Type":"ContainerStarted","Data":"7e3b28acefa95b6051d4c2e6d724554038d9b3b64635dba321fd49c2bdcf0f01"} Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.602114 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.602632 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6974cb66c7-79vhs" event={"ID":"14cecad5-f676-45e0-958e-2ba1779784d4","Type":"ContainerDied","Data":"bf11826e9d8c83e6e3dde8dec0df7862bc58e12ba2964fe246d3672294e75d54"} Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.602677 4784 scope.go:117] "RemoveContainer" containerID="cd675eff15322276fef4ca5d46462873f40aac2fd7605efc2565209664babe61" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.610490 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-66567888d7-whnpc" podStartSLOduration=4.610463195 podStartE2EDuration="4.610463195s" podCreationTimestamp="2026-01-06 08:34:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:22.591376489 +0000 UTC m=+1164.637549326" watchObservedRunningTime="2026-01-06 08:34:22.610463195 +0000 UTC m=+1164.656636032" Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.653447 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.696760 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6974cb66c7-79vhs"] Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.726277 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6974cb66c7-79vhs"] Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.765854 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:34:22 crc kubenswrapper[4784]: I0106 08:34:22.805960 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:34:23 crc kubenswrapper[4784]: I0106 08:34:23.618459 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"32caa57d-b418-4276-8718-f230dcfd282e","Type":"ContainerStarted","Data":"4f8b6f23c39b3dff6c7d3b3fe54be3db7ad46df7629d55c704c3bf247a1f2efa"} Jan 06 08:34:23 crc kubenswrapper[4784]: I0106 08:34:23.619079 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="32caa57d-b418-4276-8718-f230dcfd282e" containerName="glance-log" containerID="cri-o://473a8ebf44fb7a38859d9d2f26168edc9d3b8babd2f658187f07c7dcf42224d5" gracePeriod=30 Jan 06 08:34:23 crc kubenswrapper[4784]: I0106 08:34:23.619688 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="32caa57d-b418-4276-8718-f230dcfd282e" containerName="glance-httpd" containerID="cri-o://4f8b6f23c39b3dff6c7d3b3fe54be3db7ad46df7629d55c704c3bf247a1f2efa" gracePeriod=30 Jan 06 08:34:23 crc kubenswrapper[4784]: I0106 08:34:23.623666 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"144afc20-0318-4972-8caa-87553550e6f4","Type":"ContainerStarted","Data":"b892bda747d1844e036df29f611eef0a6b2e2ba665ca456c4be278bab9a12eb9"} Jan 06 08:34:23 crc kubenswrapper[4784]: I0106 08:34:23.623821 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="144afc20-0318-4972-8caa-87553550e6f4" containerName="glance-log" containerID="cri-o://7e3b28acefa95b6051d4c2e6d724554038d9b3b64635dba321fd49c2bdcf0f01" gracePeriod=30 Jan 06 08:34:23 crc kubenswrapper[4784]: I0106 08:34:23.623914 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="144afc20-0318-4972-8caa-87553550e6f4" containerName="glance-httpd" containerID="cri-o://b892bda747d1844e036df29f611eef0a6b2e2ba665ca456c4be278bab9a12eb9" gracePeriod=30 Jan 06 08:34:23 crc kubenswrapper[4784]: I0106 08:34:23.694083 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.6940575429999996 podStartE2EDuration="5.694057543s" podCreationTimestamp="2026-01-06 08:34:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:23.646521571 +0000 UTC m=+1165.692694408" watchObservedRunningTime="2026-01-06 08:34:23.694057543 +0000 UTC m=+1165.740230380" Jan 06 08:34:23 crc kubenswrapper[4784]: I0106 08:34:23.699291 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.699272097 podStartE2EDuration="5.699272097s" podCreationTimestamp="2026-01-06 08:34:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:23.674913447 +0000 UTC m=+1165.721086274" watchObservedRunningTime="2026-01-06 08:34:23.699272097 +0000 UTC m=+1165.745444924" Jan 06 08:34:24 crc kubenswrapper[4784]: I0106 08:34:24.322435 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14cecad5-f676-45e0-958e-2ba1779784d4" path="/var/lib/kubelet/pods/14cecad5-f676-45e0-958e-2ba1779784d4/volumes" Jan 06 08:34:24 crc kubenswrapper[4784]: I0106 08:34:24.648850 4784 generic.go:334] "Generic (PLEG): container finished" podID="32caa57d-b418-4276-8718-f230dcfd282e" containerID="4f8b6f23c39b3dff6c7d3b3fe54be3db7ad46df7629d55c704c3bf247a1f2efa" exitCode=143 Jan 06 08:34:24 crc kubenswrapper[4784]: I0106 08:34:24.648901 4784 generic.go:334] "Generic (PLEG): container finished" podID="32caa57d-b418-4276-8718-f230dcfd282e" containerID="473a8ebf44fb7a38859d9d2f26168edc9d3b8babd2f658187f07c7dcf42224d5" exitCode=143 Jan 06 08:34:24 crc kubenswrapper[4784]: I0106 08:34:24.648956 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"32caa57d-b418-4276-8718-f230dcfd282e","Type":"ContainerDied","Data":"4f8b6f23c39b3dff6c7d3b3fe54be3db7ad46df7629d55c704c3bf247a1f2efa"} Jan 06 08:34:24 crc kubenswrapper[4784]: I0106 08:34:24.648991 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"32caa57d-b418-4276-8718-f230dcfd282e","Type":"ContainerDied","Data":"473a8ebf44fb7a38859d9d2f26168edc9d3b8babd2f658187f07c7dcf42224d5"} Jan 06 08:34:24 crc kubenswrapper[4784]: I0106 08:34:24.654762 4784 generic.go:334] "Generic (PLEG): container finished" podID="144afc20-0318-4972-8caa-87553550e6f4" containerID="b892bda747d1844e036df29f611eef0a6b2e2ba665ca456c4be278bab9a12eb9" exitCode=143 Jan 06 08:34:24 crc kubenswrapper[4784]: I0106 08:34:24.654803 4784 generic.go:334] "Generic (PLEG): container finished" podID="144afc20-0318-4972-8caa-87553550e6f4" containerID="7e3b28acefa95b6051d4c2e6d724554038d9b3b64635dba321fd49c2bdcf0f01" exitCode=143 Jan 06 08:34:24 crc kubenswrapper[4784]: I0106 08:34:24.654836 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"144afc20-0318-4972-8caa-87553550e6f4","Type":"ContainerDied","Data":"b892bda747d1844e036df29f611eef0a6b2e2ba665ca456c4be278bab9a12eb9"} Jan 06 08:34:24 crc kubenswrapper[4784]: I0106 08:34:24.654873 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"144afc20-0318-4972-8caa-87553550e6f4","Type":"ContainerDied","Data":"7e3b28acefa95b6051d4c2e6d724554038d9b3b64635dba321fd49c2bdcf0f01"} Jan 06 08:34:25 crc kubenswrapper[4784]: I0106 08:34:25.669455 4784 generic.go:334] "Generic (PLEG): container finished" podID="5a024bf3-6906-497e-9973-458b271505e1" containerID="4054deb074a112412430df78be1ca870064ab339204c29f58878b697b84964a7" exitCode=0 Jan 06 08:34:25 crc kubenswrapper[4784]: I0106 08:34:25.669511 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m2rvw" event={"ID":"5a024bf3-6906-497e-9973-458b271505e1","Type":"ContainerDied","Data":"4054deb074a112412430df78be1ca870064ab339204c29f58878b697b84964a7"} Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.128179 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.135318 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.259663 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"144afc20-0318-4972-8caa-87553550e6f4\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.259714 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-scripts\") pod \"144afc20-0318-4972-8caa-87553550e6f4\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.259736 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-config-data\") pod \"32caa57d-b418-4276-8718-f230dcfd282e\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.259757 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-combined-ca-bundle\") pod \"32caa57d-b418-4276-8718-f230dcfd282e\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.259806 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/144afc20-0318-4972-8caa-87553550e6f4-logs\") pod \"144afc20-0318-4972-8caa-87553550e6f4\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.259828 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/32caa57d-b418-4276-8718-f230dcfd282e-httpd-run\") pod \"32caa57d-b418-4276-8718-f230dcfd282e\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.259852 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/144afc20-0318-4972-8caa-87553550e6f4-httpd-run\") pod \"144afc20-0318-4972-8caa-87553550e6f4\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.259888 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdmdk\" (UniqueName: \"kubernetes.io/projected/144afc20-0318-4972-8caa-87553550e6f4-kube-api-access-kdmdk\") pod \"144afc20-0318-4972-8caa-87553550e6f4\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.259966 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-combined-ca-bundle\") pod \"144afc20-0318-4972-8caa-87553550e6f4\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.259998 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-scripts\") pod \"32caa57d-b418-4276-8718-f230dcfd282e\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.260038 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32caa57d-b418-4276-8718-f230dcfd282e-logs\") pod \"32caa57d-b418-4276-8718-f230dcfd282e\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.260080 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"32caa57d-b418-4276-8718-f230dcfd282e\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.260104 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ltqhj\" (UniqueName: \"kubernetes.io/projected/32caa57d-b418-4276-8718-f230dcfd282e-kube-api-access-ltqhj\") pod \"32caa57d-b418-4276-8718-f230dcfd282e\" (UID: \"32caa57d-b418-4276-8718-f230dcfd282e\") " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.260121 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-config-data\") pod \"144afc20-0318-4972-8caa-87553550e6f4\" (UID: \"144afc20-0318-4972-8caa-87553550e6f4\") " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.261455 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32caa57d-b418-4276-8718-f230dcfd282e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "32caa57d-b418-4276-8718-f230dcfd282e" (UID: "32caa57d-b418-4276-8718-f230dcfd282e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.261597 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/144afc20-0318-4972-8caa-87553550e6f4-logs" (OuterVolumeSpecName: "logs") pod "144afc20-0318-4972-8caa-87553550e6f4" (UID: "144afc20-0318-4972-8caa-87553550e6f4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.262009 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32caa57d-b418-4276-8718-f230dcfd282e-logs" (OuterVolumeSpecName: "logs") pod "32caa57d-b418-4276-8718-f230dcfd282e" (UID: "32caa57d-b418-4276-8718-f230dcfd282e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.262181 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/144afc20-0318-4972-8caa-87553550e6f4-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "144afc20-0318-4972-8caa-87553550e6f4" (UID: "144afc20-0318-4972-8caa-87553550e6f4"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.278737 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "144afc20-0318-4972-8caa-87553550e6f4" (UID: "144afc20-0318-4972-8caa-87553550e6f4"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.278910 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32caa57d-b418-4276-8718-f230dcfd282e-kube-api-access-ltqhj" (OuterVolumeSpecName: "kube-api-access-ltqhj") pod "32caa57d-b418-4276-8718-f230dcfd282e" (UID: "32caa57d-b418-4276-8718-f230dcfd282e"). InnerVolumeSpecName "kube-api-access-ltqhj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.278933 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-scripts" (OuterVolumeSpecName: "scripts") pod "32caa57d-b418-4276-8718-f230dcfd282e" (UID: "32caa57d-b418-4276-8718-f230dcfd282e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.281678 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "32caa57d-b418-4276-8718-f230dcfd282e" (UID: "32caa57d-b418-4276-8718-f230dcfd282e"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.291755 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/144afc20-0318-4972-8caa-87553550e6f4-kube-api-access-kdmdk" (OuterVolumeSpecName: "kube-api-access-kdmdk") pod "144afc20-0318-4972-8caa-87553550e6f4" (UID: "144afc20-0318-4972-8caa-87553550e6f4"). InnerVolumeSpecName "kube-api-access-kdmdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.292426 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-scripts" (OuterVolumeSpecName: "scripts") pod "144afc20-0318-4972-8caa-87553550e6f4" (UID: "144afc20-0318-4972-8caa-87553550e6f4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.301970 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "32caa57d-b418-4276-8718-f230dcfd282e" (UID: "32caa57d-b418-4276-8718-f230dcfd282e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.307424 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "144afc20-0318-4972-8caa-87553550e6f4" (UID: "144afc20-0318-4972-8caa-87553550e6f4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.335439 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-config-data" (OuterVolumeSpecName: "config-data") pod "144afc20-0318-4972-8caa-87553550e6f4" (UID: "144afc20-0318-4972-8caa-87553550e6f4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.343415 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-config-data" (OuterVolumeSpecName: "config-data") pod "32caa57d-b418-4276-8718-f230dcfd282e" (UID: "32caa57d-b418-4276-8718-f230dcfd282e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.363123 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdmdk\" (UniqueName: \"kubernetes.io/projected/144afc20-0318-4972-8caa-87553550e6f4-kube-api-access-kdmdk\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.363169 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.363187 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.363200 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32caa57d-b418-4276-8718-f230dcfd282e-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.363251 4784 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.363267 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ltqhj\" (UniqueName: \"kubernetes.io/projected/32caa57d-b418-4276-8718-f230dcfd282e-kube-api-access-ltqhj\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.363282 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.363301 4784 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.363314 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/144afc20-0318-4972-8caa-87553550e6f4-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.363326 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.363348 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32caa57d-b418-4276-8718-f230dcfd282e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.363363 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/144afc20-0318-4972-8caa-87553550e6f4-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.363374 4784 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/32caa57d-b418-4276-8718-f230dcfd282e-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.363388 4784 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/144afc20-0318-4972-8caa-87553550e6f4-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.383625 4784 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.385507 4784 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.465165 4784 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.465208 4784 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.718168 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.719171 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"32caa57d-b418-4276-8718-f230dcfd282e","Type":"ContainerDied","Data":"e5f81c47df6369e543674aff0ce2581a5d888d3125d90966c6ccde37040e2a85"} Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.719238 4784 scope.go:117] "RemoveContainer" containerID="4f8b6f23c39b3dff6c7d3b3fe54be3db7ad46df7629d55c704c3bf247a1f2efa" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.734906 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.735974 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"144afc20-0318-4972-8caa-87553550e6f4","Type":"ContainerDied","Data":"a6d71c76595addfe7c8bc0a8b89ab838066730225f0e4945288561cff6b7b5af"} Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.772939 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.817013 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.854911 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.870578 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.884396 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:34:26 crc kubenswrapper[4784]: E0106 08:34:26.884970 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14cecad5-f676-45e0-958e-2ba1779784d4" containerName="init" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.884992 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="14cecad5-f676-45e0-958e-2ba1779784d4" containerName="init" Jan 06 08:34:26 crc kubenswrapper[4784]: E0106 08:34:26.885005 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b957b8c5-25c3-453a-bd68-bab5fd0007b3" containerName="dnsmasq-dns" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.885012 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b957b8c5-25c3-453a-bd68-bab5fd0007b3" containerName="dnsmasq-dns" Jan 06 08:34:26 crc kubenswrapper[4784]: E0106 08:34:26.885030 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="144afc20-0318-4972-8caa-87553550e6f4" containerName="glance-log" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.885036 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="144afc20-0318-4972-8caa-87553550e6f4" containerName="glance-log" Jan 06 08:34:26 crc kubenswrapper[4784]: E0106 08:34:26.885048 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32caa57d-b418-4276-8718-f230dcfd282e" containerName="glance-httpd" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.885057 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="32caa57d-b418-4276-8718-f230dcfd282e" containerName="glance-httpd" Jan 06 08:34:26 crc kubenswrapper[4784]: E0106 08:34:26.885071 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b957b8c5-25c3-453a-bd68-bab5fd0007b3" containerName="init" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.885078 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b957b8c5-25c3-453a-bd68-bab5fd0007b3" containerName="init" Jan 06 08:34:26 crc kubenswrapper[4784]: E0106 08:34:26.885089 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="144afc20-0318-4972-8caa-87553550e6f4" containerName="glance-httpd" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.885096 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="144afc20-0318-4972-8caa-87553550e6f4" containerName="glance-httpd" Jan 06 08:34:26 crc kubenswrapper[4784]: E0106 08:34:26.885107 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32caa57d-b418-4276-8718-f230dcfd282e" containerName="glance-log" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.885113 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="32caa57d-b418-4276-8718-f230dcfd282e" containerName="glance-log" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.885288 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="144afc20-0318-4972-8caa-87553550e6f4" containerName="glance-log" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.885303 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="14cecad5-f676-45e0-958e-2ba1779784d4" containerName="init" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.885312 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="32caa57d-b418-4276-8718-f230dcfd282e" containerName="glance-log" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.885321 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="144afc20-0318-4972-8caa-87553550e6f4" containerName="glance-httpd" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.885332 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="b957b8c5-25c3-453a-bd68-bab5fd0007b3" containerName="dnsmasq-dns" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.885346 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="32caa57d-b418-4276-8718-f230dcfd282e" containerName="glance-httpd" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.886426 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.890531 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.890911 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.891040 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-8kx7t" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.891176 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.898005 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.900689 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.903678 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.903876 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.915483 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:34:26 crc kubenswrapper[4784]: I0106 08:34:26.932295 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:34:26 crc kubenswrapper[4784]: E0106 08:34:26.939995 4784 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod32caa57d_b418_4276_8718_f230dcfd282e.slice/crio-e5f81c47df6369e543674aff0ce2581a5d888d3125d90966c6ccde37040e2a85\": RecentStats: unable to find data in memory cache]" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.023250 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1424869a-8bd7-4f1c-9f98-17a826550168-logs\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.023322 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-config-data\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.023341 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-logs\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.023379 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.023426 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.023465 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.023505 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.023533 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvq6r\" (UniqueName: \"kubernetes.io/projected/1424869a-8bd7-4f1c-9f98-17a826550168-kube-api-access-pvq6r\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.023682 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.023704 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85nbb\" (UniqueName: \"kubernetes.io/projected/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-kube-api-access-85nbb\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.023966 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1424869a-8bd7-4f1c-9f98-17a826550168-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.023987 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.024011 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.024026 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-scripts\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.024099 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.024128 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126284 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126336 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvq6r\" (UniqueName: \"kubernetes.io/projected/1424869a-8bd7-4f1c-9f98-17a826550168-kube-api-access-pvq6r\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126382 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126403 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85nbb\" (UniqueName: \"kubernetes.io/projected/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-kube-api-access-85nbb\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126484 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1424869a-8bd7-4f1c-9f98-17a826550168-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126504 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126526 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126554 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-scripts\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126574 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126595 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126628 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1424869a-8bd7-4f1c-9f98-17a826550168-logs\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126655 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-config-data\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126671 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-logs\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126692 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126715 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.126734 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.127003 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.129685 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.130028 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-logs\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.136151 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.136770 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.137475 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1424869a-8bd7-4f1c-9f98-17a826550168-logs\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.137724 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1424869a-8bd7-4f1c-9f98-17a826550168-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.146644 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.148005 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.149332 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.149566 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85nbb\" (UniqueName: \"kubernetes.io/projected/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-kube-api-access-85nbb\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.155613 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-config-data\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.158046 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-scripts\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.158903 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.164657 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.166363 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvq6r\" (UniqueName: \"kubernetes.io/projected/1424869a-8bd7-4f1c-9f98-17a826550168-kube-api-access-pvq6r\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.178809 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.190152 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " pod="openstack/glance-default-external-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.219136 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 06 08:34:27 crc kubenswrapper[4784]: I0106 08:34:27.240245 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 06 08:34:28 crc kubenswrapper[4784]: I0106 08:34:28.334197 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="144afc20-0318-4972-8caa-87553550e6f4" path="/var/lib/kubelet/pods/144afc20-0318-4972-8caa-87553550e6f4/volumes" Jan 06 08:34:28 crc kubenswrapper[4784]: I0106 08:34:28.339231 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32caa57d-b418-4276-8718-f230dcfd282e" path="/var/lib/kubelet/pods/32caa57d-b418-4276-8718-f230dcfd282e/volumes" Jan 06 08:34:29 crc kubenswrapper[4784]: I0106 08:34:29.301856 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:29 crc kubenswrapper[4784]: I0106 08:34:29.365026 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75bdffd66f-w6jbq"] Jan 06 08:34:29 crc kubenswrapper[4784]: I0106 08:34:29.365381 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" podUID="8a90527d-4de4-42bf-9661-f4fe0bd16579" containerName="dnsmasq-dns" containerID="cri-o://fa0c569e23b0406cfe8984fe9a3315da9da5b3978a4f85113647cad15c7813b3" gracePeriod=10 Jan 06 08:34:30 crc kubenswrapper[4784]: I0106 08:34:30.786698 4784 generic.go:334] "Generic (PLEG): container finished" podID="8a90527d-4de4-42bf-9661-f4fe0bd16579" containerID="fa0c569e23b0406cfe8984fe9a3315da9da5b3978a4f85113647cad15c7813b3" exitCode=0 Jan 06 08:34:30 crc kubenswrapper[4784]: I0106 08:34:30.787696 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" event={"ID":"8a90527d-4de4-42bf-9661-f4fe0bd16579","Type":"ContainerDied","Data":"fa0c569e23b0406cfe8984fe9a3315da9da5b3978a4f85113647cad15c7813b3"} Jan 06 08:34:32 crc kubenswrapper[4784]: I0106 08:34:32.485055 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" podUID="8a90527d-4de4-42bf-9661-f4fe0bd16579" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.130:5353: connect: connection refused" Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.154698 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.257824 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-credential-keys\") pod \"5a024bf3-6906-497e-9973-458b271505e1\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.257900 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-scripts\") pod \"5a024bf3-6906-497e-9973-458b271505e1\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.258052 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgx57\" (UniqueName: \"kubernetes.io/projected/5a024bf3-6906-497e-9973-458b271505e1-kube-api-access-bgx57\") pod \"5a024bf3-6906-497e-9973-458b271505e1\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.258079 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-config-data\") pod \"5a024bf3-6906-497e-9973-458b271505e1\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.258157 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-combined-ca-bundle\") pod \"5a024bf3-6906-497e-9973-458b271505e1\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.259400 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-fernet-keys\") pod \"5a024bf3-6906-497e-9973-458b271505e1\" (UID: \"5a024bf3-6906-497e-9973-458b271505e1\") " Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.266964 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a024bf3-6906-497e-9973-458b271505e1-kube-api-access-bgx57" (OuterVolumeSpecName: "kube-api-access-bgx57") pod "5a024bf3-6906-497e-9973-458b271505e1" (UID: "5a024bf3-6906-497e-9973-458b271505e1"). InnerVolumeSpecName "kube-api-access-bgx57". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.267089 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-scripts" (OuterVolumeSpecName: "scripts") pod "5a024bf3-6906-497e-9973-458b271505e1" (UID: "5a024bf3-6906-497e-9973-458b271505e1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.268287 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "5a024bf3-6906-497e-9973-458b271505e1" (UID: "5a024bf3-6906-497e-9973-458b271505e1"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.287919 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5a024bf3-6906-497e-9973-458b271505e1" (UID: "5a024bf3-6906-497e-9973-458b271505e1"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.288307 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-config-data" (OuterVolumeSpecName: "config-data") pod "5a024bf3-6906-497e-9973-458b271505e1" (UID: "5a024bf3-6906-497e-9973-458b271505e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.303665 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5a024bf3-6906-497e-9973-458b271505e1" (UID: "5a024bf3-6906-497e-9973-458b271505e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.370815 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgx57\" (UniqueName: \"kubernetes.io/projected/5a024bf3-6906-497e-9973-458b271505e1-kube-api-access-bgx57\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.370879 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.370908 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.370925 4784 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.370945 4784 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.370960 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a024bf3-6906-497e-9973-458b271505e1-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:33 crc kubenswrapper[4784]: E0106 08:34:33.656415 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:fe32d3ea620f0c7ecfdde9bbf28417fde03bc18c6f60b1408fa8da24d8188f16" Jan 06 08:34:33 crc kubenswrapper[4784]: E0106 08:34:33.656674 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:fe32d3ea620f0c7ecfdde9bbf28417fde03bc18c6f60b1408fa8da24d8188f16,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p9nfl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-7qsrr_openstack(0dd652b3-9755-47c0-a4cc-c39c86d840f3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 06 08:34:33 crc kubenswrapper[4784]: E0106 08:34:33.657930 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-7qsrr" podUID="0dd652b3-9755-47c0-a4cc-c39c86d840f3" Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.816866 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m2rvw" event={"ID":"5a024bf3-6906-497e-9973-458b271505e1","Type":"ContainerDied","Data":"d74639cd72fa68c9bddabbcb594ed86110e170102e8ecb584a87c634f5db2874"} Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.816947 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d74639cd72fa68c9bddabbcb594ed86110e170102e8ecb584a87c634f5db2874" Jan 06 08:34:33 crc kubenswrapper[4784]: I0106 08:34:33.816904 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m2rvw" Jan 06 08:34:33 crc kubenswrapper[4784]: E0106 08:34:33.820033 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:fe32d3ea620f0c7ecfdde9bbf28417fde03bc18c6f60b1408fa8da24d8188f16\\\"\"" pod="openstack/barbican-db-sync-7qsrr" podUID="0dd652b3-9755-47c0-a4cc-c39c86d840f3" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.244710 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-m2rvw"] Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.254309 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-m2rvw"] Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.329749 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a024bf3-6906-497e-9973-458b271505e1" path="/var/lib/kubelet/pods/5a024bf3-6906-497e-9973-458b271505e1/volumes" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.352168 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-fhdrv"] Jan 06 08:34:34 crc kubenswrapper[4784]: E0106 08:34:34.353007 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a024bf3-6906-497e-9973-458b271505e1" containerName="keystone-bootstrap" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.353190 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a024bf3-6906-497e-9973-458b271505e1" containerName="keystone-bootstrap" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.353495 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a024bf3-6906-497e-9973-458b271505e1" containerName="keystone-bootstrap" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.354321 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.359595 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.360892 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.361166 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.361301 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.361439 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-lgkm4" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.368586 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-fhdrv"] Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.390981 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-config-data\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.391388 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-credential-keys\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.391481 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-combined-ca-bundle\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.391654 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-fernet-keys\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.391744 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-scripts\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.391819 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tksvr\" (UniqueName: \"kubernetes.io/projected/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-kube-api-access-tksvr\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.493841 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-config-data\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.494581 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-credential-keys\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.494679 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-combined-ca-bundle\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.494776 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-fernet-keys\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.494925 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-scripts\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.495004 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tksvr\" (UniqueName: \"kubernetes.io/projected/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-kube-api-access-tksvr\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.500211 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-combined-ca-bundle\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.500393 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-credential-keys\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.501268 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-config-data\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.501703 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-fernet-keys\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.508242 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-scripts\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.516092 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tksvr\" (UniqueName: \"kubernetes.io/projected/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-kube-api-access-tksvr\") pod \"keystone-bootstrap-fhdrv\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:34 crc kubenswrapper[4784]: I0106 08:34:34.684275 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:37 crc kubenswrapper[4784]: I0106 08:34:37.484418 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" podUID="8a90527d-4de4-42bf-9661-f4fe0bd16579" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.130:5353: connect: connection refused" Jan 06 08:34:42 crc kubenswrapper[4784]: I0106 08:34:42.468874 4784 scope.go:117] "RemoveContainer" containerID="473a8ebf44fb7a38859d9d2f26168edc9d3b8babd2f658187f07c7dcf42224d5" Jan 06 08:34:42 crc kubenswrapper[4784]: I0106 08:34:42.485093 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" podUID="8a90527d-4de4-42bf-9661-f4fe0bd16579" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.130:5353: connect: connection refused" Jan 06 08:34:42 crc kubenswrapper[4784]: I0106 08:34:42.485270 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:43 crc kubenswrapper[4784]: E0106 08:34:43.591613 4784 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49" Jan 06 08:34:43 crc kubenswrapper[4784]: E0106 08:34:43.592783 4784 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cfbrd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-57tfg_openstack(a29c392c-7fa2-4a80-b072-92b8201616b8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 06 08:34:43 crc kubenswrapper[4784]: E0106 08:34:43.594011 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-57tfg" podUID="a29c392c-7fa2-4a80-b072-92b8201616b8" Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.595783 4784 scope.go:117] "RemoveContainer" containerID="b892bda747d1844e036df29f611eef0a6b2e2ba665ca456c4be278bab9a12eb9" Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.765931 4784 scope.go:117] "RemoveContainer" containerID="7e3b28acefa95b6051d4c2e6d724554038d9b3b64635dba321fd49c2bdcf0f01" Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.818701 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.904092 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-ovsdbserver-nb\") pod \"8a90527d-4de4-42bf-9661-f4fe0bd16579\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.904310 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-config\") pod \"8a90527d-4de4-42bf-9661-f4fe0bd16579\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.904359 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-dns-svc\") pod \"8a90527d-4de4-42bf-9661-f4fe0bd16579\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.904427 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-dns-swift-storage-0\") pod \"8a90527d-4de4-42bf-9661-f4fe0bd16579\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.904478 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9sc6b\" (UniqueName: \"kubernetes.io/projected/8a90527d-4de4-42bf-9661-f4fe0bd16579-kube-api-access-9sc6b\") pod \"8a90527d-4de4-42bf-9661-f4fe0bd16579\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.904498 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-ovsdbserver-sb\") pod \"8a90527d-4de4-42bf-9661-f4fe0bd16579\" (UID: \"8a90527d-4de4-42bf-9661-f4fe0bd16579\") " Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.922743 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a90527d-4de4-42bf-9661-f4fe0bd16579-kube-api-access-9sc6b" (OuterVolumeSpecName: "kube-api-access-9sc6b") pod "8a90527d-4de4-42bf-9661-f4fe0bd16579" (UID: "8a90527d-4de4-42bf-9661-f4fe0bd16579"). InnerVolumeSpecName "kube-api-access-9sc6b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.948394 4784 generic.go:334] "Generic (PLEG): container finished" podID="3f6f606e-fa4d-45d8-95a1-04a052ca0dbf" containerID="3737d9c9a4f7a004d041d4ac5573b4a1033d115a765b7915b0da0f343e6743af" exitCode=0 Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.948442 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-j4gnj" event={"ID":"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf","Type":"ContainerDied","Data":"3737d9c9a4f7a004d041d4ac5573b4a1033d115a765b7915b0da0f343e6743af"} Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.950062 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf077c5c-dec8-41a6-8677-e5f7681c83e5","Type":"ContainerStarted","Data":"dec4e7e92f5454f23c4c06e7dde325f898abb232e1ee598894529e931f51bf12"} Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.951886 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-9bdd7" event={"ID":"7a172939-8d39-40a2-9b41-1eee48de7be5","Type":"ContainerStarted","Data":"253b84f8fc7e05254da0d6eb160f615eb44959aa74159d0d375915b64cc99b79"} Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.956047 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" event={"ID":"8a90527d-4de4-42bf-9661-f4fe0bd16579","Type":"ContainerDied","Data":"9371bc3a7dd6e25a85e28117bf50e8dc8c8580b5c85b127d3a055ae8f411de38"} Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.956198 4784 scope.go:117] "RemoveContainer" containerID="fa0c569e23b0406cfe8984fe9a3315da9da5b3978a4f85113647cad15c7813b3" Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.956094 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bdffd66f-w6jbq" Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.970644 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-config" (OuterVolumeSpecName: "config") pod "8a90527d-4de4-42bf-9661-f4fe0bd16579" (UID: "8a90527d-4de4-42bf-9661-f4fe0bd16579"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:43 crc kubenswrapper[4784]: E0106 08:34:43.972759 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49\\\"\"" pod="openstack/cinder-db-sync-57tfg" podUID="a29c392c-7fa2-4a80-b072-92b8201616b8" Jan 06 08:34:43 crc kubenswrapper[4784]: I0106 08:34:43.973583 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8a90527d-4de4-42bf-9661-f4fe0bd16579" (UID: "8a90527d-4de4-42bf-9661-f4fe0bd16579"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.008833 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.008878 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.008894 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9sc6b\" (UniqueName: \"kubernetes.io/projected/8a90527d-4de4-42bf-9661-f4fe0bd16579-kube-api-access-9sc6b\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.039452 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8a90527d-4de4-42bf-9661-f4fe0bd16579" (UID: "8a90527d-4de4-42bf-9661-f4fe0bd16579"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.054252 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8a90527d-4de4-42bf-9661-f4fe0bd16579" (UID: "8a90527d-4de4-42bf-9661-f4fe0bd16579"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.054514 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-9bdd7" podStartSLOduration=2.944780321 podStartE2EDuration="26.054485063s" podCreationTimestamp="2026-01-06 08:34:18 +0000 UTC" firstStartedPulling="2026-01-06 08:34:20.445373432 +0000 UTC m=+1162.491546269" lastFinishedPulling="2026-01-06 08:34:43.555078174 +0000 UTC m=+1185.601251011" observedRunningTime="2026-01-06 08:34:44.023899509 +0000 UTC m=+1186.070072346" watchObservedRunningTime="2026-01-06 08:34:44.054485063 +0000 UTC m=+1186.100657900" Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.060080 4784 scope.go:117] "RemoveContainer" containerID="e8d0d25462ae532a0960554c07ee1eaae3e113a0e4ad88137d918acd3b67d4fa" Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.061074 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8a90527d-4de4-42bf-9661-f4fe0bd16579" (UID: "8a90527d-4de4-42bf-9661-f4fe0bd16579"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.110830 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.110866 4784 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.110879 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8a90527d-4de4-42bf-9661-f4fe0bd16579-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.173457 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-fhdrv"] Jan 06 08:34:44 crc kubenswrapper[4784]: W0106 08:34:44.176663 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5ec97fe8_08d9_4c08_8f59_7f2ee13de3b3.slice/crio-cf51f70e58c5fff888c98daf69cde62ef9e21ec1bb3c0fb0091ab6b74d46831e WatchSource:0}: Error finding container cf51f70e58c5fff888c98daf69cde62ef9e21ec1bb3c0fb0091ab6b74d46831e: Status 404 returned error can't find the container with id cf51f70e58c5fff888c98daf69cde62ef9e21ec1bb3c0fb0091ab6b74d46831e Jan 06 08:34:44 crc kubenswrapper[4784]: W0106 08:34:44.179724 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc1b66b45_3ef2_49d9_aa09_490c73fa86aa.slice/crio-a769b546a27590185a95af6106dbdcbf851665b3e6e8e2f3897605fcb60e0a61 WatchSource:0}: Error finding container a769b546a27590185a95af6106dbdcbf851665b3e6e8e2f3897605fcb60e0a61: Status 404 returned error can't find the container with id a769b546a27590185a95af6106dbdcbf851665b3e6e8e2f3897605fcb60e0a61 Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.181879 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.344262 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75bdffd66f-w6jbq"] Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.350816 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.350883 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.359602 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75bdffd66f-w6jbq"] Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.990649 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fhdrv" event={"ID":"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3","Type":"ContainerStarted","Data":"d5c3b203e197556bb2274e3877734a79c06ae2fda8e58528211f22a31d41b9ad"} Jan 06 08:34:44 crc kubenswrapper[4784]: I0106 08:34:44.991800 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fhdrv" event={"ID":"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3","Type":"ContainerStarted","Data":"cf51f70e58c5fff888c98daf69cde62ef9e21ec1bb3c0fb0091ab6b74d46831e"} Jan 06 08:34:45 crc kubenswrapper[4784]: I0106 08:34:45.002346 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c1b66b45-3ef2-49d9-aa09-490c73fa86aa","Type":"ContainerStarted","Data":"eb1555f1270b2e14f10017339475aaf7786ca925cd7726a1904ffbf885f7ab91"} Jan 06 08:34:45 crc kubenswrapper[4784]: I0106 08:34:45.002417 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c1b66b45-3ef2-49d9-aa09-490c73fa86aa","Type":"ContainerStarted","Data":"a769b546a27590185a95af6106dbdcbf851665b3e6e8e2f3897605fcb60e0a61"} Jan 06 08:34:45 crc kubenswrapper[4784]: I0106 08:34:45.035605 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-fhdrv" podStartSLOduration=11.035571345 podStartE2EDuration="11.035571345s" podCreationTimestamp="2026-01-06 08:34:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:45.019013989 +0000 UTC m=+1187.065186816" watchObservedRunningTime="2026-01-06 08:34:45.035571345 +0000 UTC m=+1187.081744182" Jan 06 08:34:45 crc kubenswrapper[4784]: I0106 08:34:45.082328 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:34:45 crc kubenswrapper[4784]: I0106 08:34:45.363153 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-j4gnj" Jan 06 08:34:45 crc kubenswrapper[4784]: I0106 08:34:45.463973 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scm79\" (UniqueName: \"kubernetes.io/projected/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-kube-api-access-scm79\") pod \"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf\" (UID: \"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf\") " Jan 06 08:34:45 crc kubenswrapper[4784]: I0106 08:34:45.464016 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-combined-ca-bundle\") pod \"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf\" (UID: \"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf\") " Jan 06 08:34:45 crc kubenswrapper[4784]: I0106 08:34:45.464161 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-config\") pod \"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf\" (UID: \"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf\") " Jan 06 08:34:45 crc kubenswrapper[4784]: I0106 08:34:45.470488 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-kube-api-access-scm79" (OuterVolumeSpecName: "kube-api-access-scm79") pod "3f6f606e-fa4d-45d8-95a1-04a052ca0dbf" (UID: "3f6f606e-fa4d-45d8-95a1-04a052ca0dbf"). InnerVolumeSpecName "kube-api-access-scm79". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:45 crc kubenswrapper[4784]: I0106 08:34:45.492865 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f6f606e-fa4d-45d8-95a1-04a052ca0dbf" (UID: "3f6f606e-fa4d-45d8-95a1-04a052ca0dbf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:45 crc kubenswrapper[4784]: I0106 08:34:45.496773 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-config" (OuterVolumeSpecName: "config") pod "3f6f606e-fa4d-45d8-95a1-04a052ca0dbf" (UID: "3f6f606e-fa4d-45d8-95a1-04a052ca0dbf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:45 crc kubenswrapper[4784]: I0106 08:34:45.569572 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:45 crc kubenswrapper[4784]: I0106 08:34:45.569626 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scm79\" (UniqueName: \"kubernetes.io/projected/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-kube-api-access-scm79\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:45 crc kubenswrapper[4784]: I0106 08:34:45.569640 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.036290 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1424869a-8bd7-4f1c-9f98-17a826550168","Type":"ContainerStarted","Data":"0a80bcc5fb85ee08a7f09e9a98db4434401965e050e8d816afb93ea83870ade1"} Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.036779 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1424869a-8bd7-4f1c-9f98-17a826550168","Type":"ContainerStarted","Data":"5da4fb387f7cf2f4d53ccb440e69652edc954f6d4926f8e97aada01737359b9e"} Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.040714 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c1b66b45-3ef2-49d9-aa09-490c73fa86aa","Type":"ContainerStarted","Data":"20a195dc2ce1cb800121a2c6468d6eb5fcf3e4a1eb9f35f70c2f72e9d6f73e77"} Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.074972 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-j4gnj" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.075012 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-j4gnj" event={"ID":"3f6f606e-fa4d-45d8-95a1-04a052ca0dbf","Type":"ContainerDied","Data":"f56a19529c602f97f5df85278e41f96a5ab07e38da20256fa3a6266e694b4931"} Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.075620 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f56a19529c602f97f5df85278e41f96a5ab07e38da20256fa3a6266e694b4931" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.089239 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=20.089214011 podStartE2EDuration="20.089214011s" podCreationTimestamp="2026-01-06 08:34:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:46.088373735 +0000 UTC m=+1188.134546562" watchObservedRunningTime="2026-01-06 08:34:46.089214011 +0000 UTC m=+1188.135386848" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.102839 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf077c5c-dec8-41a6-8677-e5f7681c83e5","Type":"ContainerStarted","Data":"7bf3ceb793765a3adbf7e614d8742cab8b27c07326767e513445beacda8adc11"} Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.107797 4784 generic.go:334] "Generic (PLEG): container finished" podID="7a172939-8d39-40a2-9b41-1eee48de7be5" containerID="253b84f8fc7e05254da0d6eb160f615eb44959aa74159d0d375915b64cc99b79" exitCode=0 Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.108781 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-9bdd7" event={"ID":"7a172939-8d39-40a2-9b41-1eee48de7be5","Type":"ContainerDied","Data":"253b84f8fc7e05254da0d6eb160f615eb44959aa74159d0d375915b64cc99b79"} Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.170852 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-cfl2q"] Jan 06 08:34:46 crc kubenswrapper[4784]: E0106 08:34:46.171270 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a90527d-4de4-42bf-9661-f4fe0bd16579" containerName="init" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.171288 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a90527d-4de4-42bf-9661-f4fe0bd16579" containerName="init" Jan 06 08:34:46 crc kubenswrapper[4784]: E0106 08:34:46.171311 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a90527d-4de4-42bf-9661-f4fe0bd16579" containerName="dnsmasq-dns" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.171318 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a90527d-4de4-42bf-9661-f4fe0bd16579" containerName="dnsmasq-dns" Jan 06 08:34:46 crc kubenswrapper[4784]: E0106 08:34:46.171340 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f6f606e-fa4d-45d8-95a1-04a052ca0dbf" containerName="neutron-db-sync" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.171347 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f6f606e-fa4d-45d8-95a1-04a052ca0dbf" containerName="neutron-db-sync" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.171502 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f6f606e-fa4d-45d8-95a1-04a052ca0dbf" containerName="neutron-db-sync" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.171516 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a90527d-4de4-42bf-9661-f4fe0bd16579" containerName="dnsmasq-dns" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.172395 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.195591 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-cfl2q"] Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.297498 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvkgc\" (UniqueName: \"kubernetes.io/projected/cb378c55-6158-463a-8c26-eaef2c173cc5-kube-api-access-wvkgc\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.297604 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-dns-swift-storage-0\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.297660 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-ovsdbserver-nb\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.298281 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-ovsdbserver-sb\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.298373 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-config\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.298504 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-dns-svc\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.361364 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a90527d-4de4-42bf-9661-f4fe0bd16579" path="/var/lib/kubelet/pods/8a90527d-4de4-42bf-9661-f4fe0bd16579/volumes" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.362057 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-646fd5ff66-md6q9"] Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.365215 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-646fd5ff66-md6q9"] Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.365323 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.370107 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.370557 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.370686 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.370827 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-88lbj" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.400119 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-config\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.400203 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-combined-ca-bundle\") pod \"neutron-646fd5ff66-md6q9\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.400235 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-ovndb-tls-certs\") pod \"neutron-646fd5ff66-md6q9\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.400275 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-dns-svc\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.400334 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-config\") pod \"neutron-646fd5ff66-md6q9\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.400362 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-httpd-config\") pod \"neutron-646fd5ff66-md6q9\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.400415 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvkgc\" (UniqueName: \"kubernetes.io/projected/cb378c55-6158-463a-8c26-eaef2c173cc5-kube-api-access-wvkgc\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.400446 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-dns-swift-storage-0\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.400470 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-ovsdbserver-nb\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.400489 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xr6p2\" (UniqueName: \"kubernetes.io/projected/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-kube-api-access-xr6p2\") pod \"neutron-646fd5ff66-md6q9\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.400528 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-ovsdbserver-sb\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.401187 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-config\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.402823 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-dns-svc\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.402866 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-ovsdbserver-sb\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.403158 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-dns-swift-storage-0\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.403364 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-ovsdbserver-nb\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.423336 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvkgc\" (UniqueName: \"kubernetes.io/projected/cb378c55-6158-463a-8c26-eaef2c173cc5-kube-api-access-wvkgc\") pod \"dnsmasq-dns-7bb67c87c9-cfl2q\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.502473 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-config\") pod \"neutron-646fd5ff66-md6q9\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.502573 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-httpd-config\") pod \"neutron-646fd5ff66-md6q9\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.502662 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xr6p2\" (UniqueName: \"kubernetes.io/projected/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-kube-api-access-xr6p2\") pod \"neutron-646fd5ff66-md6q9\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.502764 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-combined-ca-bundle\") pod \"neutron-646fd5ff66-md6q9\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.502795 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-ovndb-tls-certs\") pod \"neutron-646fd5ff66-md6q9\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.508835 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-ovndb-tls-certs\") pod \"neutron-646fd5ff66-md6q9\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.509920 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-combined-ca-bundle\") pod \"neutron-646fd5ff66-md6q9\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.509975 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-config\") pod \"neutron-646fd5ff66-md6q9\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.510364 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-httpd-config\") pod \"neutron-646fd5ff66-md6q9\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.512477 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.525124 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xr6p2\" (UniqueName: \"kubernetes.io/projected/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-kube-api-access-xr6p2\") pod \"neutron-646fd5ff66-md6q9\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:46 crc kubenswrapper[4784]: I0106 08:34:46.694462 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:47 crc kubenswrapper[4784]: I0106 08:34:47.240786 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 06 08:34:47 crc kubenswrapper[4784]: I0106 08:34:47.242817 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 06 08:34:47 crc kubenswrapper[4784]: I0106 08:34:47.267133 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-cfl2q"] Jan 06 08:34:47 crc kubenswrapper[4784]: I0106 08:34:47.290875 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 06 08:34:47 crc kubenswrapper[4784]: I0106 08:34:47.305232 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 06 08:34:47 crc kubenswrapper[4784]: W0106 08:34:47.315672 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb378c55_6158_463a_8c26_eaef2c173cc5.slice/crio-14b9b5e07b81973e1a3bc1a44f9f66a2da8de46a3b65f94352d7252b1ff9264a WatchSource:0}: Error finding container 14b9b5e07b81973e1a3bc1a44f9f66a2da8de46a3b65f94352d7252b1ff9264a: Status 404 returned error can't find the container with id 14b9b5e07b81973e1a3bc1a44f9f66a2da8de46a3b65f94352d7252b1ff9264a Jan 06 08:34:47 crc kubenswrapper[4784]: I0106 08:34:47.437678 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-646fd5ff66-md6q9"] Jan 06 08:34:47 crc kubenswrapper[4784]: W0106 08:34:47.450820 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98f474cd_143e_4f7b_8269_eca3e3c1b6b0.slice/crio-ee514d52ca3bcc001361d3fb34dab619eb21ccd57fb8b3f0226a1fa0d5e3a0af WatchSource:0}: Error finding container ee514d52ca3bcc001361d3fb34dab619eb21ccd57fb8b3f0226a1fa0d5e3a0af: Status 404 returned error can't find the container with id ee514d52ca3bcc001361d3fb34dab619eb21ccd57fb8b3f0226a1fa0d5e3a0af Jan 06 08:34:47 crc kubenswrapper[4784]: I0106 08:34:47.866224 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.049664 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-config-data\") pod \"7a172939-8d39-40a2-9b41-1eee48de7be5\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.049838 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wptm4\" (UniqueName: \"kubernetes.io/projected/7a172939-8d39-40a2-9b41-1eee48de7be5-kube-api-access-wptm4\") pod \"7a172939-8d39-40a2-9b41-1eee48de7be5\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.049901 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a172939-8d39-40a2-9b41-1eee48de7be5-logs\") pod \"7a172939-8d39-40a2-9b41-1eee48de7be5\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.050044 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-combined-ca-bundle\") pod \"7a172939-8d39-40a2-9b41-1eee48de7be5\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.050159 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-scripts\") pod \"7a172939-8d39-40a2-9b41-1eee48de7be5\" (UID: \"7a172939-8d39-40a2-9b41-1eee48de7be5\") " Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.051477 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a172939-8d39-40a2-9b41-1eee48de7be5-logs" (OuterVolumeSpecName: "logs") pod "7a172939-8d39-40a2-9b41-1eee48de7be5" (UID: "7a172939-8d39-40a2-9b41-1eee48de7be5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.051867 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a172939-8d39-40a2-9b41-1eee48de7be5-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.060558 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-scripts" (OuterVolumeSpecName: "scripts") pod "7a172939-8d39-40a2-9b41-1eee48de7be5" (UID: "7a172939-8d39-40a2-9b41-1eee48de7be5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.075210 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a172939-8d39-40a2-9b41-1eee48de7be5-kube-api-access-wptm4" (OuterVolumeSpecName: "kube-api-access-wptm4") pod "7a172939-8d39-40a2-9b41-1eee48de7be5" (UID: "7a172939-8d39-40a2-9b41-1eee48de7be5"). InnerVolumeSpecName "kube-api-access-wptm4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.097644 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-config-data" (OuterVolumeSpecName: "config-data") pod "7a172939-8d39-40a2-9b41-1eee48de7be5" (UID: "7a172939-8d39-40a2-9b41-1eee48de7be5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.110903 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7a172939-8d39-40a2-9b41-1eee48de7be5" (UID: "7a172939-8d39-40a2-9b41-1eee48de7be5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.153540 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wptm4\" (UniqueName: \"kubernetes.io/projected/7a172939-8d39-40a2-9b41-1eee48de7be5-kube-api-access-wptm4\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.153584 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.153595 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.153605 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a172939-8d39-40a2-9b41-1eee48de7be5-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.160332 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-646fd5ff66-md6q9" event={"ID":"98f474cd-143e-4f7b-8269-eca3e3c1b6b0","Type":"ContainerStarted","Data":"71de650cad0dde73291bd080b9ece2d17f9c3a722f41f857e748759314ce259a"} Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.160384 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-646fd5ff66-md6q9" event={"ID":"98f474cd-143e-4f7b-8269-eca3e3c1b6b0","Type":"ContainerStarted","Data":"ee514d52ca3bcc001361d3fb34dab619eb21ccd57fb8b3f0226a1fa0d5e3a0af"} Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.164463 4784 generic.go:334] "Generic (PLEG): container finished" podID="cb378c55-6158-463a-8c26-eaef2c173cc5" containerID="9e938fe53de519b5ef7c9e8139bb950085c933cd19f796332bd04026db444dd6" exitCode=0 Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.164520 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" event={"ID":"cb378c55-6158-463a-8c26-eaef2c173cc5","Type":"ContainerDied","Data":"9e938fe53de519b5ef7c9e8139bb950085c933cd19f796332bd04026db444dd6"} Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.164540 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" event={"ID":"cb378c55-6158-463a-8c26-eaef2c173cc5","Type":"ContainerStarted","Data":"14b9b5e07b81973e1a3bc1a44f9f66a2da8de46a3b65f94352d7252b1ff9264a"} Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.183347 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-9bdd7" event={"ID":"7a172939-8d39-40a2-9b41-1eee48de7be5","Type":"ContainerDied","Data":"9354db582338ecaf727558cd89d7e198f05e68cdf1cfa63df21d666b6b647da8"} Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.183407 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9354db582338ecaf727558cd89d7e198f05e68cdf1cfa63df21d666b6b647da8" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.183506 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-9bdd7" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.204191 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1424869a-8bd7-4f1c-9f98-17a826550168","Type":"ContainerStarted","Data":"fd83bbd26fefdde8520038bbfa17bc570843c33022e2fb92a18c631bd3f1d88c"} Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.204456 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.204741 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.284253 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=22.284228467 podStartE2EDuration="22.284228467s" podCreationTimestamp="2026-01-06 08:34:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:48.259603739 +0000 UTC m=+1190.305776576" watchObservedRunningTime="2026-01-06 08:34:48.284228467 +0000 UTC m=+1190.330401304" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.371922 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5759d5cbc4-2r87d"] Jan 06 08:34:48 crc kubenswrapper[4784]: E0106 08:34:48.372452 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a172939-8d39-40a2-9b41-1eee48de7be5" containerName="placement-db-sync" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.372472 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a172939-8d39-40a2-9b41-1eee48de7be5" containerName="placement-db-sync" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.372698 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a172939-8d39-40a2-9b41-1eee48de7be5" containerName="placement-db-sync" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.373742 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.390182 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-2vb5p" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.390424 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.390513 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.390603 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.390671 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.405853 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5759d5cbc4-2r87d"] Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.483969 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6cd2b801-83a4-410f-a555-8dfda270713a-logs\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.484088 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-internal-tls-certs\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.484221 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-combined-ca-bundle\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.484247 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-scripts\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.484274 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-config-data\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.484334 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hm6qc\" (UniqueName: \"kubernetes.io/projected/6cd2b801-83a4-410f-a555-8dfda270713a-kube-api-access-hm6qc\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.484426 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-public-tls-certs\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.585774 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-combined-ca-bundle\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.586327 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-scripts\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.586356 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-config-data\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.586402 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hm6qc\" (UniqueName: \"kubernetes.io/projected/6cd2b801-83a4-410f-a555-8dfda270713a-kube-api-access-hm6qc\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.586446 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-public-tls-certs\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.586483 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6cd2b801-83a4-410f-a555-8dfda270713a-logs\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.586531 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-internal-tls-certs\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.590719 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6cd2b801-83a4-410f-a555-8dfda270713a-logs\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.596422 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-config-data\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.600128 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-internal-tls-certs\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.606459 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-public-tls-certs\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.607470 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-scripts\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.608182 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-combined-ca-bundle\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.615172 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hm6qc\" (UniqueName: \"kubernetes.io/projected/6cd2b801-83a4-410f-a555-8dfda270713a-kube-api-access-hm6qc\") pod \"placement-5759d5cbc4-2r87d\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.707316 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.882643 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-849db5db7c-vjb4f"] Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.884616 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.893046 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.901098 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-849db5db7c-vjb4f"] Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.901320 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.996398 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-internal-tls-certs\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.996917 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmdjs\" (UniqueName: \"kubernetes.io/projected/bed6a7b9-0069-4ea7-b813-70a5808d18db-kube-api-access-rmdjs\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.996945 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-config\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.996986 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-combined-ca-bundle\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.997004 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-httpd-config\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.998403 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-public-tls-certs\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:48 crc kubenswrapper[4784]: I0106 08:34:48.998455 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-ovndb-tls-certs\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.100568 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-internal-tls-certs\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.100626 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmdjs\" (UniqueName: \"kubernetes.io/projected/bed6a7b9-0069-4ea7-b813-70a5808d18db-kube-api-access-rmdjs\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.100658 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-config\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.100709 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-combined-ca-bundle\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.100737 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-httpd-config\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.100777 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-public-tls-certs\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.100809 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-ovndb-tls-certs\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.108792 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-combined-ca-bundle\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.111085 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-internal-tls-certs\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.111124 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-httpd-config\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.112083 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-ovndb-tls-certs\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.113421 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-config\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.127327 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-public-tls-certs\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.140235 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmdjs\" (UniqueName: \"kubernetes.io/projected/bed6a7b9-0069-4ea7-b813-70a5808d18db-kube-api-access-rmdjs\") pod \"neutron-849db5db7c-vjb4f\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.221854 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-646fd5ff66-md6q9" event={"ID":"98f474cd-143e-4f7b-8269-eca3e3c1b6b0","Type":"ContainerStarted","Data":"5c0f8bf209994f84889c0f824711560eeddf5feb2740b5e35a651ac009c166d4"} Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.221996 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.223742 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.227297 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" event={"ID":"cb378c55-6158-463a-8c26-eaef2c173cc5","Type":"ContainerStarted","Data":"8472cd9de42ff397eff4358dec557a1905e937e60b1dbc897a83a21ef79ce87b"} Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.227454 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.229063 4784 generic.go:334] "Generic (PLEG): container finished" podID="5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3" containerID="d5c3b203e197556bb2274e3877734a79c06ae2fda8e58528211f22a31d41b9ad" exitCode=0 Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.230436 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fhdrv" event={"ID":"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3","Type":"ContainerDied","Data":"d5c3b203e197556bb2274e3877734a79c06ae2fda8e58528211f22a31d41b9ad"} Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.254340 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-646fd5ff66-md6q9" podStartSLOduration=3.254312287 podStartE2EDuration="3.254312287s" podCreationTimestamp="2026-01-06 08:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:49.239901688 +0000 UTC m=+1191.286074525" watchObservedRunningTime="2026-01-06 08:34:49.254312287 +0000 UTC m=+1191.300485124" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.312410 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" podStartSLOduration=3.312384088 podStartE2EDuration="3.312384088s" podCreationTimestamp="2026-01-06 08:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:49.279225144 +0000 UTC m=+1191.325397981" watchObservedRunningTime="2026-01-06 08:34:49.312384088 +0000 UTC m=+1191.358556925" Jan 06 08:34:49 crc kubenswrapper[4784]: I0106 08:34:49.333185 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5759d5cbc4-2r87d"] Jan 06 08:34:51 crc kubenswrapper[4784]: I0106 08:34:51.629702 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 06 08:34:51 crc kubenswrapper[4784]: I0106 08:34:51.638750 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 06 08:34:53 crc kubenswrapper[4784]: W0106 08:34:53.930010 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cd2b801_83a4_410f_a555_8dfda270713a.slice/crio-9eac8dbb220c2ed526d704e524ff84da3f345205b87705c844159d8cfa7b7a7b WatchSource:0}: Error finding container 9eac8dbb220c2ed526d704e524ff84da3f345205b87705c844159d8cfa7b7a7b: Status 404 returned error can't find the container with id 9eac8dbb220c2ed526d704e524ff84da3f345205b87705c844159d8cfa7b7a7b Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.181358 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.231132 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-fernet-keys\") pod \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.231289 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-config-data\") pod \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.231326 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tksvr\" (UniqueName: \"kubernetes.io/projected/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-kube-api-access-tksvr\") pod \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.231469 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-combined-ca-bundle\") pod \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.231588 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-scripts\") pod \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.231643 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-credential-keys\") pod \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\" (UID: \"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3\") " Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.246344 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-kube-api-access-tksvr" (OuterVolumeSpecName: "kube-api-access-tksvr") pod "5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3" (UID: "5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3"). InnerVolumeSpecName "kube-api-access-tksvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.259793 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-scripts" (OuterVolumeSpecName: "scripts") pod "5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3" (UID: "5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.306260 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3" (UID: "5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.314423 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3" (UID: "5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.343135 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tksvr\" (UniqueName: \"kubernetes.io/projected/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-kube-api-access-tksvr\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.343166 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.343179 4784 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.343187 4784 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.358818 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-config-data" (OuterVolumeSpecName: "config-data") pod "5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3" (UID: "5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.389296 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3" (UID: "5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.395850 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fhdrv" event={"ID":"5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3","Type":"ContainerDied","Data":"cf51f70e58c5fff888c98daf69cde62ef9e21ec1bb3c0fb0091ab6b74d46831e"} Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.395916 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cf51f70e58c5fff888c98daf69cde62ef9e21ec1bb3c0fb0091ab6b74d46831e" Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.396682 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fhdrv" Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.403529 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5759d5cbc4-2r87d" event={"ID":"6cd2b801-83a4-410f-a555-8dfda270713a","Type":"ContainerStarted","Data":"9eac8dbb220c2ed526d704e524ff84da3f345205b87705c844159d8cfa7b7a7b"} Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.444862 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.444909 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:54 crc kubenswrapper[4784]: I0106 08:34:54.834982 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-849db5db7c-vjb4f"] Jan 06 08:34:54 crc kubenswrapper[4784]: W0106 08:34:54.835769 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbed6a7b9_0069_4ea7_b813_70a5808d18db.slice/crio-f1c29aa93b8ef6ee31b56b049827679ac1ca8e301f311aa2da73ec0a93b0f137 WatchSource:0}: Error finding container f1c29aa93b8ef6ee31b56b049827679ac1ca8e301f311aa2da73ec0a93b0f137: Status 404 returned error can't find the container with id f1c29aa93b8ef6ee31b56b049827679ac1ca8e301f311aa2da73ec0a93b0f137 Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.355696 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-5c58bd8cfd-bjvgd"] Jan 06 08:34:55 crc kubenswrapper[4784]: E0106 08:34:55.356844 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3" containerName="keystone-bootstrap" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.356865 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3" containerName="keystone-bootstrap" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.357105 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3" containerName="keystone-bootstrap" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.357999 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.362907 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.363849 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.366211 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-lgkm4" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.366305 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.366419 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.366937 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.372826 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5c58bd8cfd-bjvgd"] Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.441726 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf077c5c-dec8-41a6-8677-e5f7681c83e5","Type":"ContainerStarted","Data":"d5faeb1a613a5c8eebd6a7437055973ba171c7ef9929ddbd631522a15773dd98"} Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.447149 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-849db5db7c-vjb4f" event={"ID":"bed6a7b9-0069-4ea7-b813-70a5808d18db","Type":"ContainerStarted","Data":"3dbabc5c6ed9e67890a74683afbfe123a07fdb5e9564f6fd5fe7f8edcd519e4c"} Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.447199 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-849db5db7c-vjb4f" event={"ID":"bed6a7b9-0069-4ea7-b813-70a5808d18db","Type":"ContainerStarted","Data":"f1c29aa93b8ef6ee31b56b049827679ac1ca8e301f311aa2da73ec0a93b0f137"} Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.448101 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.457800 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5759d5cbc4-2r87d" event={"ID":"6cd2b801-83a4-410f-a555-8dfda270713a","Type":"ContainerStarted","Data":"7edfdbc25e2ee69a5e5b15b59c0c29dcf7e91b34fc5883ef77bb32176ff55cd7"} Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.457868 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5759d5cbc4-2r87d" event={"ID":"6cd2b801-83a4-410f-a555-8dfda270713a","Type":"ContainerStarted","Data":"07f60e1630bdcd2399c45169de65854f62b21633e41ebfe502b28d51d39226cc"} Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.458630 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.458843 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.463906 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-7qsrr" event={"ID":"0dd652b3-9755-47c0-a4cc-c39c86d840f3","Type":"ContainerStarted","Data":"feed05ef6c38396759ec4e80765ca0e6db6cc4f47e245c22ac358cdc7ef876ba"} Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.465009 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-config-data\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.465064 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-public-tls-certs\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.465116 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-credential-keys\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.465193 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-scripts\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.465217 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jr2tg\" (UniqueName: \"kubernetes.io/projected/47f75a1e-4d3b-4460-8420-05ac7e981c8e-kube-api-access-jr2tg\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.465419 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-combined-ca-bundle\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.465450 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-internal-tls-certs\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.465561 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-fernet-keys\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.480132 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-849db5db7c-vjb4f" podStartSLOduration=7.480108905 podStartE2EDuration="7.480108905s" podCreationTimestamp="2026-01-06 08:34:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:55.479679921 +0000 UTC m=+1197.525852758" watchObservedRunningTime="2026-01-06 08:34:55.480108905 +0000 UTC m=+1197.526281742" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.525952 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-7qsrr" podStartSLOduration=3.956581952 podStartE2EDuration="37.525930943s" podCreationTimestamp="2026-01-06 08:34:18 +0000 UTC" firstStartedPulling="2026-01-06 08:34:20.431873032 +0000 UTC m=+1162.478045869" lastFinishedPulling="2026-01-06 08:34:54.001222023 +0000 UTC m=+1196.047394860" observedRunningTime="2026-01-06 08:34:55.522074683 +0000 UTC m=+1197.568247520" watchObservedRunningTime="2026-01-06 08:34:55.525930943 +0000 UTC m=+1197.572103780" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.568206 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-scripts\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.568404 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jr2tg\" (UniqueName: \"kubernetes.io/projected/47f75a1e-4d3b-4460-8420-05ac7e981c8e-kube-api-access-jr2tg\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.568563 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-combined-ca-bundle\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.568610 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-internal-tls-certs\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.568686 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-fernet-keys\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.568758 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-config-data\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.568835 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-public-tls-certs\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.568892 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-credential-keys\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.571833 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5759d5cbc4-2r87d" podStartSLOduration=7.571804783 podStartE2EDuration="7.571804783s" podCreationTimestamp="2026-01-06 08:34:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:55.562039258 +0000 UTC m=+1197.608212095" watchObservedRunningTime="2026-01-06 08:34:55.571804783 +0000 UTC m=+1197.617977620" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.581422 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-credential-keys\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.591841 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-combined-ca-bundle\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.593401 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-config-data\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.593406 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-fernet-keys\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.593798 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-scripts\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.593907 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-internal-tls-certs\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.594355 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-public-tls-certs\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.600120 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jr2tg\" (UniqueName: \"kubernetes.io/projected/47f75a1e-4d3b-4460-8420-05ac7e981c8e-kube-api-access-jr2tg\") pod \"keystone-5c58bd8cfd-bjvgd\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:55 crc kubenswrapper[4784]: I0106 08:34:55.676338 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:56 crc kubenswrapper[4784]: I0106 08:34:56.283318 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5c58bd8cfd-bjvgd"] Jan 06 08:34:56 crc kubenswrapper[4784]: W0106 08:34:56.307531 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod47f75a1e_4d3b_4460_8420_05ac7e981c8e.slice/crio-2f3bb16f1555024f2cc7a9259fc9047d35325a194ca9389b1c3085531a670606 WatchSource:0}: Error finding container 2f3bb16f1555024f2cc7a9259fc9047d35325a194ca9389b1c3085531a670606: Status 404 returned error can't find the container with id 2f3bb16f1555024f2cc7a9259fc9047d35325a194ca9389b1c3085531a670606 Jan 06 08:34:56 crc kubenswrapper[4784]: I0106 08:34:56.479724 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5c58bd8cfd-bjvgd" event={"ID":"47f75a1e-4d3b-4460-8420-05ac7e981c8e","Type":"ContainerStarted","Data":"2f3bb16f1555024f2cc7a9259fc9047d35325a194ca9389b1c3085531a670606"} Jan 06 08:34:56 crc kubenswrapper[4784]: I0106 08:34:56.487256 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-849db5db7c-vjb4f" event={"ID":"bed6a7b9-0069-4ea7-b813-70a5808d18db","Type":"ContainerStarted","Data":"b771c84b2fee4dfd30eea462159329d4aa16e7a25274eb645e61e328c8f50840"} Jan 06 08:34:56 crc kubenswrapper[4784]: I0106 08:34:56.518154 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:34:56 crc kubenswrapper[4784]: I0106 08:34:56.615601 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66567888d7-whnpc"] Jan 06 08:34:56 crc kubenswrapper[4784]: I0106 08:34:56.634669 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-66567888d7-whnpc" podUID="977e57a8-a139-41d0-b757-2ea7134bd9ce" containerName="dnsmasq-dns" containerID="cri-o://39de4ffa0dfca0f8dd609c556de3fc7cbe886700a71bd19ba29da9492225304c" gracePeriod=10 Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.220834 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.221352 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.221368 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.221380 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.269459 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.278961 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.328038 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.431229 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-config\") pod \"977e57a8-a139-41d0-b757-2ea7134bd9ce\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.431326 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-dns-swift-storage-0\") pod \"977e57a8-a139-41d0-b757-2ea7134bd9ce\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.431459 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-ovsdbserver-sb\") pod \"977e57a8-a139-41d0-b757-2ea7134bd9ce\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.431555 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-dns-svc\") pod \"977e57a8-a139-41d0-b757-2ea7134bd9ce\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.431589 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4tq9r\" (UniqueName: \"kubernetes.io/projected/977e57a8-a139-41d0-b757-2ea7134bd9ce-kube-api-access-4tq9r\") pod \"977e57a8-a139-41d0-b757-2ea7134bd9ce\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.431640 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-ovsdbserver-nb\") pod \"977e57a8-a139-41d0-b757-2ea7134bd9ce\" (UID: \"977e57a8-a139-41d0-b757-2ea7134bd9ce\") " Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.485918 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/977e57a8-a139-41d0-b757-2ea7134bd9ce-kube-api-access-4tq9r" (OuterVolumeSpecName: "kube-api-access-4tq9r") pod "977e57a8-a139-41d0-b757-2ea7134bd9ce" (UID: "977e57a8-a139-41d0-b757-2ea7134bd9ce"). InnerVolumeSpecName "kube-api-access-4tq9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.541402 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4tq9r\" (UniqueName: \"kubernetes.io/projected/977e57a8-a139-41d0-b757-2ea7134bd9ce-kube-api-access-4tq9r\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.564925 4784 generic.go:334] "Generic (PLEG): container finished" podID="977e57a8-a139-41d0-b757-2ea7134bd9ce" containerID="39de4ffa0dfca0f8dd609c556de3fc7cbe886700a71bd19ba29da9492225304c" exitCode=0 Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.565262 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-whnpc" event={"ID":"977e57a8-a139-41d0-b757-2ea7134bd9ce","Type":"ContainerDied","Data":"39de4ffa0dfca0f8dd609c556de3fc7cbe886700a71bd19ba29da9492225304c"} Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.565346 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-whnpc" event={"ID":"977e57a8-a139-41d0-b757-2ea7134bd9ce","Type":"ContainerDied","Data":"9e2691a4b8901eb304fc753250d138432e90c6b727497c1c620e2b0f2d603413"} Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.565449 4784 scope.go:117] "RemoveContainer" containerID="39de4ffa0dfca0f8dd609c556de3fc7cbe886700a71bd19ba29da9492225304c" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.565689 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66567888d7-whnpc" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.593082 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5c58bd8cfd-bjvgd" event={"ID":"47f75a1e-4d3b-4460-8420-05ac7e981c8e","Type":"ContainerStarted","Data":"4223cd132ca6515fc76e29d4b62d62f23ca0125b03efe4932036e3c3b22ddecd"} Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.593742 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.606705 4784 scope.go:117] "RemoveContainer" containerID="d565550505f3c74fbef04eebd288d2476e838f88f2b128a1c88d13d963e0b4c7" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.665301 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "977e57a8-a139-41d0-b757-2ea7134bd9ce" (UID: "977e57a8-a139-41d0-b757-2ea7134bd9ce"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.666873 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-config" (OuterVolumeSpecName: "config") pod "977e57a8-a139-41d0-b757-2ea7134bd9ce" (UID: "977e57a8-a139-41d0-b757-2ea7134bd9ce"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.672896 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-5c58bd8cfd-bjvgd" podStartSLOduration=2.672866789 podStartE2EDuration="2.672866789s" podCreationTimestamp="2026-01-06 08:34:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:34:57.626627978 +0000 UTC m=+1199.672800815" watchObservedRunningTime="2026-01-06 08:34:57.672866789 +0000 UTC m=+1199.719039626" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.675612 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "977e57a8-a139-41d0-b757-2ea7134bd9ce" (UID: "977e57a8-a139-41d0-b757-2ea7134bd9ce"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.676138 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "977e57a8-a139-41d0-b757-2ea7134bd9ce" (UID: "977e57a8-a139-41d0-b757-2ea7134bd9ce"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.679296 4784 scope.go:117] "RemoveContainer" containerID="39de4ffa0dfca0f8dd609c556de3fc7cbe886700a71bd19ba29da9492225304c" Jan 06 08:34:57 crc kubenswrapper[4784]: E0106 08:34:57.681017 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39de4ffa0dfca0f8dd609c556de3fc7cbe886700a71bd19ba29da9492225304c\": container with ID starting with 39de4ffa0dfca0f8dd609c556de3fc7cbe886700a71bd19ba29da9492225304c not found: ID does not exist" containerID="39de4ffa0dfca0f8dd609c556de3fc7cbe886700a71bd19ba29da9492225304c" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.681066 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39de4ffa0dfca0f8dd609c556de3fc7cbe886700a71bd19ba29da9492225304c"} err="failed to get container status \"39de4ffa0dfca0f8dd609c556de3fc7cbe886700a71bd19ba29da9492225304c\": rpc error: code = NotFound desc = could not find container \"39de4ffa0dfca0f8dd609c556de3fc7cbe886700a71bd19ba29da9492225304c\": container with ID starting with 39de4ffa0dfca0f8dd609c556de3fc7cbe886700a71bd19ba29da9492225304c not found: ID does not exist" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.681096 4784 scope.go:117] "RemoveContainer" containerID="d565550505f3c74fbef04eebd288d2476e838f88f2b128a1c88d13d963e0b4c7" Jan 06 08:34:57 crc kubenswrapper[4784]: E0106 08:34:57.682010 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d565550505f3c74fbef04eebd288d2476e838f88f2b128a1c88d13d963e0b4c7\": container with ID starting with d565550505f3c74fbef04eebd288d2476e838f88f2b128a1c88d13d963e0b4c7 not found: ID does not exist" containerID="d565550505f3c74fbef04eebd288d2476e838f88f2b128a1c88d13d963e0b4c7" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.682029 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d565550505f3c74fbef04eebd288d2476e838f88f2b128a1c88d13d963e0b4c7"} err="failed to get container status \"d565550505f3c74fbef04eebd288d2476e838f88f2b128a1c88d13d963e0b4c7\": rpc error: code = NotFound desc = could not find container \"d565550505f3c74fbef04eebd288d2476e838f88f2b128a1c88d13d963e0b4c7\": container with ID starting with d565550505f3c74fbef04eebd288d2476e838f88f2b128a1c88d13d963e0b4c7 not found: ID does not exist" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.697220 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "977e57a8-a139-41d0-b757-2ea7134bd9ce" (UID: "977e57a8-a139-41d0-b757-2ea7134bd9ce"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.758006 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.758136 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.758367 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.758473 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.758535 4784 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/977e57a8-a139-41d0-b757-2ea7134bd9ce-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 06 08:34:57 crc kubenswrapper[4784]: E0106 08:34:57.897009 4784 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0dd652b3_9755_47c0_a4cc_c39c86d840f3.slice/crio-feed05ef6c38396759ec4e80765ca0e6db6cc4f47e245c22ac358cdc7ef876ba.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0dd652b3_9755_47c0_a4cc_c39c86d840f3.slice/crio-conmon-feed05ef6c38396759ec4e80765ca0e6db6cc4f47e245c22ac358cdc7ef876ba.scope\": RecentStats: unable to find data in memory cache]" Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.930630 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66567888d7-whnpc"] Jan 06 08:34:57 crc kubenswrapper[4784]: I0106 08:34:57.942357 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-66567888d7-whnpc"] Jan 06 08:34:58 crc kubenswrapper[4784]: I0106 08:34:58.354055 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="977e57a8-a139-41d0-b757-2ea7134bd9ce" path="/var/lib/kubelet/pods/977e57a8-a139-41d0-b757-2ea7134bd9ce/volumes" Jan 06 08:34:58 crc kubenswrapper[4784]: I0106 08:34:58.610133 4784 generic.go:334] "Generic (PLEG): container finished" podID="0dd652b3-9755-47c0-a4cc-c39c86d840f3" containerID="feed05ef6c38396759ec4e80765ca0e6db6cc4f47e245c22ac358cdc7ef876ba" exitCode=0 Jan 06 08:34:58 crc kubenswrapper[4784]: I0106 08:34:58.610223 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-7qsrr" event={"ID":"0dd652b3-9755-47c0-a4cc-c39c86d840f3","Type":"ContainerDied","Data":"feed05ef6c38396759ec4e80765ca0e6db6cc4f47e245c22ac358cdc7ef876ba"} Jan 06 08:34:59 crc kubenswrapper[4784]: I0106 08:34:59.622426 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-57tfg" event={"ID":"a29c392c-7fa2-4a80-b072-92b8201616b8","Type":"ContainerStarted","Data":"2d24fcba65829c8263c5636ac218b33b703ab3269bed25bdf38a8d29dd40237d"} Jan 06 08:34:59 crc kubenswrapper[4784]: I0106 08:34:59.650258 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-57tfg" podStartSLOduration=2.893508062 podStartE2EDuration="41.65023456s" podCreationTimestamp="2026-01-06 08:34:18 +0000 UTC" firstStartedPulling="2026-01-06 08:34:20.043232177 +0000 UTC m=+1162.089405014" lastFinishedPulling="2026-01-06 08:34:58.799958675 +0000 UTC m=+1200.846131512" observedRunningTime="2026-01-06 08:34:59.648354732 +0000 UTC m=+1201.694527569" watchObservedRunningTime="2026-01-06 08:34:59.65023456 +0000 UTC m=+1201.696407397" Jan 06 08:34:59 crc kubenswrapper[4784]: I0106 08:34:59.730124 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 06 08:34:59 crc kubenswrapper[4784]: I0106 08:34:59.730224 4784 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 06 08:34:59 crc kubenswrapper[4784]: I0106 08:34:59.812767 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 06 08:35:04 crc kubenswrapper[4784]: I0106 08:35:04.826880 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-7qsrr" Jan 06 08:35:04 crc kubenswrapper[4784]: I0106 08:35:04.927757 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9nfl\" (UniqueName: \"kubernetes.io/projected/0dd652b3-9755-47c0-a4cc-c39c86d840f3-kube-api-access-p9nfl\") pod \"0dd652b3-9755-47c0-a4cc-c39c86d840f3\" (UID: \"0dd652b3-9755-47c0-a4cc-c39c86d840f3\") " Jan 06 08:35:04 crc kubenswrapper[4784]: I0106 08:35:04.927942 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0dd652b3-9755-47c0-a4cc-c39c86d840f3-db-sync-config-data\") pod \"0dd652b3-9755-47c0-a4cc-c39c86d840f3\" (UID: \"0dd652b3-9755-47c0-a4cc-c39c86d840f3\") " Jan 06 08:35:04 crc kubenswrapper[4784]: I0106 08:35:04.928134 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dd652b3-9755-47c0-a4cc-c39c86d840f3-combined-ca-bundle\") pod \"0dd652b3-9755-47c0-a4cc-c39c86d840f3\" (UID: \"0dd652b3-9755-47c0-a4cc-c39c86d840f3\") " Jan 06 08:35:04 crc kubenswrapper[4784]: I0106 08:35:04.935779 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dd652b3-9755-47c0-a4cc-c39c86d840f3-kube-api-access-p9nfl" (OuterVolumeSpecName: "kube-api-access-p9nfl") pod "0dd652b3-9755-47c0-a4cc-c39c86d840f3" (UID: "0dd652b3-9755-47c0-a4cc-c39c86d840f3"). InnerVolumeSpecName "kube-api-access-p9nfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:04 crc kubenswrapper[4784]: I0106 08:35:04.935841 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dd652b3-9755-47c0-a4cc-c39c86d840f3-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "0dd652b3-9755-47c0-a4cc-c39c86d840f3" (UID: "0dd652b3-9755-47c0-a4cc-c39c86d840f3"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:04 crc kubenswrapper[4784]: I0106 08:35:04.977311 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dd652b3-9755-47c0-a4cc-c39c86d840f3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0dd652b3-9755-47c0-a4cc-c39c86d840f3" (UID: "0dd652b3-9755-47c0-a4cc-c39c86d840f3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:05 crc kubenswrapper[4784]: I0106 08:35:05.030915 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dd652b3-9755-47c0-a4cc-c39c86d840f3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:05 crc kubenswrapper[4784]: I0106 08:35:05.030961 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9nfl\" (UniqueName: \"kubernetes.io/projected/0dd652b3-9755-47c0-a4cc-c39c86d840f3-kube-api-access-p9nfl\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:05 crc kubenswrapper[4784]: I0106 08:35:05.030979 4784 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0dd652b3-9755-47c0-a4cc-c39c86d840f3-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:05 crc kubenswrapper[4784]: I0106 08:35:05.715880 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf077c5c-dec8-41a6-8677-e5f7681c83e5","Type":"ContainerStarted","Data":"129f03cc7dd5ce9eaaeb5161793fdac68d8a5e86fbf5534bdc8f5dbf093f8ab2"} Jan 06 08:35:05 crc kubenswrapper[4784]: I0106 08:35:05.716271 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="ceilometer-central-agent" containerID="cri-o://dec4e7e92f5454f23c4c06e7dde325f898abb232e1ee598894529e931f51bf12" gracePeriod=30 Jan 06 08:35:05 crc kubenswrapper[4784]: I0106 08:35:05.716530 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 06 08:35:05 crc kubenswrapper[4784]: I0106 08:35:05.716648 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="proxy-httpd" containerID="cri-o://129f03cc7dd5ce9eaaeb5161793fdac68d8a5e86fbf5534bdc8f5dbf093f8ab2" gracePeriod=30 Jan 06 08:35:05 crc kubenswrapper[4784]: I0106 08:35:05.716749 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="sg-core" containerID="cri-o://d5faeb1a613a5c8eebd6a7437055973ba171c7ef9929ddbd631522a15773dd98" gracePeriod=30 Jan 06 08:35:05 crc kubenswrapper[4784]: I0106 08:35:05.716791 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="ceilometer-notification-agent" containerID="cri-o://7bf3ceb793765a3adbf7e614d8742cab8b27c07326767e513445beacda8adc11" gracePeriod=30 Jan 06 08:35:05 crc kubenswrapper[4784]: I0106 08:35:05.724247 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-7qsrr" event={"ID":"0dd652b3-9755-47c0-a4cc-c39c86d840f3","Type":"ContainerDied","Data":"156a6e3178af10fecb28c6ced39a17bf907a0448118669fde357da6404a79c93"} Jan 06 08:35:05 crc kubenswrapper[4784]: I0106 08:35:05.724302 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="156a6e3178af10fecb28c6ced39a17bf907a0448118669fde357da6404a79c93" Jan 06 08:35:05 crc kubenswrapper[4784]: I0106 08:35:05.724385 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-7qsrr" Jan 06 08:35:05 crc kubenswrapper[4784]: I0106 08:35:05.761052 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.829316111 podStartE2EDuration="47.761031873s" podCreationTimestamp="2026-01-06 08:34:18 +0000 UTC" firstStartedPulling="2026-01-06 08:34:20.456901762 +0000 UTC m=+1162.503074599" lastFinishedPulling="2026-01-06 08:35:05.388617524 +0000 UTC m=+1207.434790361" observedRunningTime="2026-01-06 08:35:05.756632886 +0000 UTC m=+1207.802805743" watchObservedRunningTime="2026-01-06 08:35:05.761031873 +0000 UTC m=+1207.807204700" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.127157 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-55f595f44f-tzkkl"] Jan 06 08:35:06 crc kubenswrapper[4784]: E0106 08:35:06.127599 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dd652b3-9755-47c0-a4cc-c39c86d840f3" containerName="barbican-db-sync" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.127614 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dd652b3-9755-47c0-a4cc-c39c86d840f3" containerName="barbican-db-sync" Jan 06 08:35:06 crc kubenswrapper[4784]: E0106 08:35:06.127643 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="977e57a8-a139-41d0-b757-2ea7134bd9ce" containerName="dnsmasq-dns" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.127649 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="977e57a8-a139-41d0-b757-2ea7134bd9ce" containerName="dnsmasq-dns" Jan 06 08:35:06 crc kubenswrapper[4784]: E0106 08:35:06.127667 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="977e57a8-a139-41d0-b757-2ea7134bd9ce" containerName="init" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.127673 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="977e57a8-a139-41d0-b757-2ea7134bd9ce" containerName="init" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.127837 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dd652b3-9755-47c0-a4cc-c39c86d840f3" containerName="barbican-db-sync" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.127867 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="977e57a8-a139-41d0-b757-2ea7134bd9ce" containerName="dnsmasq-dns" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.128782 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.143697 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.143968 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-mzt7s" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.144116 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.173673 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-55f595f44f-tzkkl"] Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.194335 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk"] Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.198051 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.220630 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.231187 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk"] Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.270920 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-combined-ca-bundle\") pod \"barbican-worker-55f595f44f-tzkkl\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.271015 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2q7qr\" (UniqueName: \"kubernetes.io/projected/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-kube-api-access-2q7qr\") pod \"barbican-worker-55f595f44f-tzkkl\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.271051 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-config-data\") pod \"barbican-worker-55f595f44f-tzkkl\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.271091 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-config-data-custom\") pod \"barbican-worker-55f595f44f-tzkkl\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.271111 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-logs\") pod \"barbican-worker-55f595f44f-tzkkl\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.273462 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-cjb76"] Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.282413 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.333120 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-cjb76"] Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.373417 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f9f2\" (UniqueName: \"kubernetes.io/projected/8b500fc9-50b8-4ca7-8a72-57b10f94427b-kube-api-access-4f9f2\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.373819 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2q7qr\" (UniqueName: \"kubernetes.io/projected/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-kube-api-access-2q7qr\") pod \"barbican-worker-55f595f44f-tzkkl\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.373975 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-ovsdbserver-nb\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.374093 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-config\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.374189 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-config-data\") pod \"barbican-worker-55f595f44f-tzkkl\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.374295 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmtxg\" (UniqueName: \"kubernetes.io/projected/3ddaa7ef-b912-4b5b-9bfa-820818220eef-kube-api-access-tmtxg\") pod \"barbican-keystone-listener-fd8f7c7f6-bnzsk\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.374448 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-config-data-custom\") pod \"barbican-worker-55f595f44f-tzkkl\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.374574 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-config-data\") pod \"barbican-keystone-listener-fd8f7c7f6-bnzsk\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.374693 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-logs\") pod \"barbican-worker-55f595f44f-tzkkl\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.374848 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-combined-ca-bundle\") pod \"barbican-keystone-listener-fd8f7c7f6-bnzsk\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.374986 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-dns-swift-storage-0\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.375130 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-dns-svc\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.375225 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-ovsdbserver-sb\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.375349 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ddaa7ef-b912-4b5b-9bfa-820818220eef-logs\") pod \"barbican-keystone-listener-fd8f7c7f6-bnzsk\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.375468 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-config-data-custom\") pod \"barbican-keystone-listener-fd8f7c7f6-bnzsk\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.375594 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-combined-ca-bundle\") pod \"barbican-worker-55f595f44f-tzkkl\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.377072 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-logs\") pod \"barbican-worker-55f595f44f-tzkkl\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.387452 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-combined-ca-bundle\") pod \"barbican-worker-55f595f44f-tzkkl\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.387838 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-config-data-custom\") pod \"barbican-worker-55f595f44f-tzkkl\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.396959 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2q7qr\" (UniqueName: \"kubernetes.io/projected/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-kube-api-access-2q7qr\") pod \"barbican-worker-55f595f44f-tzkkl\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.408566 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-config-data\") pod \"barbican-worker-55f595f44f-tzkkl\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.412827 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-b76bcf676-6p7dt"] Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.415190 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.423834 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.447669 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-b76bcf676-6p7dt"] Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.480425 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-dns-swift-storage-0\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.480522 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-dns-svc\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.480573 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-ovsdbserver-sb\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.480618 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ddaa7ef-b912-4b5b-9bfa-820818220eef-logs\") pod \"barbican-keystone-listener-fd8f7c7f6-bnzsk\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.480655 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-config-data-custom\") pod \"barbican-keystone-listener-fd8f7c7f6-bnzsk\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.480688 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4f9f2\" (UniqueName: \"kubernetes.io/projected/8b500fc9-50b8-4ca7-8a72-57b10f94427b-kube-api-access-4f9f2\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.480759 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-ovsdbserver-nb\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.480795 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-config\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.480824 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmtxg\" (UniqueName: \"kubernetes.io/projected/3ddaa7ef-b912-4b5b-9bfa-820818220eef-kube-api-access-tmtxg\") pod \"barbican-keystone-listener-fd8f7c7f6-bnzsk\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.480879 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-config-data\") pod \"barbican-keystone-listener-fd8f7c7f6-bnzsk\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.480951 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-combined-ca-bundle\") pod \"barbican-keystone-listener-fd8f7c7f6-bnzsk\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.483144 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ddaa7ef-b912-4b5b-9bfa-820818220eef-logs\") pod \"barbican-keystone-listener-fd8f7c7f6-bnzsk\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.484195 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-ovsdbserver-nb\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.484239 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-config\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.484781 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-ovsdbserver-sb\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.484818 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-dns-swift-storage-0\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.485396 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-dns-svc\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.491517 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-config-data\") pod \"barbican-keystone-listener-fd8f7c7f6-bnzsk\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.494167 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-config-data-custom\") pod \"barbican-keystone-listener-fd8f7c7f6-bnzsk\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.496436 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.497353 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-combined-ca-bundle\") pod \"barbican-keystone-listener-fd8f7c7f6-bnzsk\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.505937 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmtxg\" (UniqueName: \"kubernetes.io/projected/3ddaa7ef-b912-4b5b-9bfa-820818220eef-kube-api-access-tmtxg\") pod \"barbican-keystone-listener-fd8f7c7f6-bnzsk\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.507289 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f9f2\" (UniqueName: \"kubernetes.io/projected/8b500fc9-50b8-4ca7-8a72-57b10f94427b-kube-api-access-4f9f2\") pod \"dnsmasq-dns-54c4dfcffc-cjb76\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.563306 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.582530 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-combined-ca-bundle\") pod \"barbican-api-b76bcf676-6p7dt\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.582704 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-logs\") pod \"barbican-api-b76bcf676-6p7dt\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.582736 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-config-data\") pod \"barbican-api-b76bcf676-6p7dt\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.582799 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsckw\" (UniqueName: \"kubernetes.io/projected/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-kube-api-access-tsckw\") pod \"barbican-api-b76bcf676-6p7dt\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.582847 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-config-data-custom\") pod \"barbican-api-b76bcf676-6p7dt\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.625672 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.685821 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-combined-ca-bundle\") pod \"barbican-api-b76bcf676-6p7dt\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.686387 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-logs\") pod \"barbican-api-b76bcf676-6p7dt\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.686792 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-logs\") pod \"barbican-api-b76bcf676-6p7dt\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.686899 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-config-data\") pod \"barbican-api-b76bcf676-6p7dt\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.687222 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsckw\" (UniqueName: \"kubernetes.io/projected/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-kube-api-access-tsckw\") pod \"barbican-api-b76bcf676-6p7dt\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.687249 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-config-data-custom\") pod \"barbican-api-b76bcf676-6p7dt\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.694240 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-config-data-custom\") pod \"barbican-api-b76bcf676-6p7dt\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.695786 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-combined-ca-bundle\") pod \"barbican-api-b76bcf676-6p7dt\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.700988 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-config-data\") pod \"barbican-api-b76bcf676-6p7dt\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.708284 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsckw\" (UniqueName: \"kubernetes.io/projected/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-kube-api-access-tsckw\") pod \"barbican-api-b76bcf676-6p7dt\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.764650 4784 generic.go:334] "Generic (PLEG): container finished" podID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerID="129f03cc7dd5ce9eaaeb5161793fdac68d8a5e86fbf5534bdc8f5dbf093f8ab2" exitCode=0 Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.764759 4784 generic.go:334] "Generic (PLEG): container finished" podID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerID="d5faeb1a613a5c8eebd6a7437055973ba171c7ef9929ddbd631522a15773dd98" exitCode=2 Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.764787 4784 generic.go:334] "Generic (PLEG): container finished" podID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerID="dec4e7e92f5454f23c4c06e7dde325f898abb232e1ee598894529e931f51bf12" exitCode=0 Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.764735 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf077c5c-dec8-41a6-8677-e5f7681c83e5","Type":"ContainerDied","Data":"129f03cc7dd5ce9eaaeb5161793fdac68d8a5e86fbf5534bdc8f5dbf093f8ab2"} Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.764948 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf077c5c-dec8-41a6-8677-e5f7681c83e5","Type":"ContainerDied","Data":"d5faeb1a613a5c8eebd6a7437055973ba171c7ef9929ddbd631522a15773dd98"} Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.764977 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf077c5c-dec8-41a6-8677-e5f7681c83e5","Type":"ContainerDied","Data":"dec4e7e92f5454f23c4c06e7dde325f898abb232e1ee598894529e931f51bf12"} Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.767744 4784 generic.go:334] "Generic (PLEG): container finished" podID="a29c392c-7fa2-4a80-b072-92b8201616b8" containerID="2d24fcba65829c8263c5636ac218b33b703ab3269bed25bdf38a8d29dd40237d" exitCode=0 Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.767788 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-57tfg" event={"ID":"a29c392c-7fa2-4a80-b072-92b8201616b8","Type":"ContainerDied","Data":"2d24fcba65829c8263c5636ac218b33b703ab3269bed25bdf38a8d29dd40237d"} Jan 06 08:35:06 crc kubenswrapper[4784]: I0106 08:35:06.791553 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:07 crc kubenswrapper[4784]: I0106 08:35:07.065535 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-55f595f44f-tzkkl"] Jan 06 08:35:07 crc kubenswrapper[4784]: W0106 08:35:07.084785 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a5f283f_3491_4531_8213_b2c0eb6b3fc8.slice/crio-776394bc9292b8ab9260f991242f8bcb263bd670798e1d4459e43ebd4f76295c WatchSource:0}: Error finding container 776394bc9292b8ab9260f991242f8bcb263bd670798e1d4459e43ebd4f76295c: Status 404 returned error can't find the container with id 776394bc9292b8ab9260f991242f8bcb263bd670798e1d4459e43ebd4f76295c Jan 06 08:35:07 crc kubenswrapper[4784]: I0106 08:35:07.201322 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-cjb76"] Jan 06 08:35:07 crc kubenswrapper[4784]: I0106 08:35:07.213466 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk"] Jan 06 08:35:07 crc kubenswrapper[4784]: I0106 08:35:07.472237 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-b76bcf676-6p7dt"] Jan 06 08:35:07 crc kubenswrapper[4784]: W0106 08:35:07.529297 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c9ed1de_b7d4_42e3_aae2_023f704b3ed8.slice/crio-3627ae2167415c8a0626bad1071a385d691c52a50e7eff48ef96de8be9858efd WatchSource:0}: Error finding container 3627ae2167415c8a0626bad1071a385d691c52a50e7eff48ef96de8be9858efd: Status 404 returned error can't find the container with id 3627ae2167415c8a0626bad1071a385d691c52a50e7eff48ef96de8be9858efd Jan 06 08:35:07 crc kubenswrapper[4784]: I0106 08:35:07.788731 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-55f595f44f-tzkkl" event={"ID":"8a5f283f-3491-4531-8213-b2c0eb6b3fc8","Type":"ContainerStarted","Data":"776394bc9292b8ab9260f991242f8bcb263bd670798e1d4459e43ebd4f76295c"} Jan 06 08:35:07 crc kubenswrapper[4784]: I0106 08:35:07.795469 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-b76bcf676-6p7dt" event={"ID":"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8","Type":"ContainerStarted","Data":"9ce5d0551db59c71071e6ebc7c18e8c4f0493f3e5840e492125d1c819ee210cd"} Jan 06 08:35:07 crc kubenswrapper[4784]: I0106 08:35:07.795556 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-b76bcf676-6p7dt" event={"ID":"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8","Type":"ContainerStarted","Data":"3627ae2167415c8a0626bad1071a385d691c52a50e7eff48ef96de8be9858efd"} Jan 06 08:35:07 crc kubenswrapper[4784]: I0106 08:35:07.799036 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" event={"ID":"3ddaa7ef-b912-4b5b-9bfa-820818220eef","Type":"ContainerStarted","Data":"3743ff4e4a25039cd231726fde85f8fc7183846905a4d555a3075282631d426a"} Jan 06 08:35:07 crc kubenswrapper[4784]: I0106 08:35:07.801087 4784 generic.go:334] "Generic (PLEG): container finished" podID="8b500fc9-50b8-4ca7-8a72-57b10f94427b" containerID="e107aaeb2a2a1a784a3d9f61f6a770680bf2d6a2e2f9bfebc140613ae2a1359d" exitCode=0 Jan 06 08:35:07 crc kubenswrapper[4784]: I0106 08:35:07.802449 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" event={"ID":"8b500fc9-50b8-4ca7-8a72-57b10f94427b","Type":"ContainerDied","Data":"e107aaeb2a2a1a784a3d9f61f6a770680bf2d6a2e2f9bfebc140613ae2a1359d"} Jan 06 08:35:07 crc kubenswrapper[4784]: I0106 08:35:07.802485 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" event={"ID":"8b500fc9-50b8-4ca7-8a72-57b10f94427b","Type":"ContainerStarted","Data":"a3b9962858a070b3fc9675e0f60749f5873108ac83d2de3a9ee0bef52a9e8f34"} Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.121956 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-57tfg" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.223440 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-combined-ca-bundle\") pod \"a29c392c-7fa2-4a80-b072-92b8201616b8\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.223647 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-config-data\") pod \"a29c392c-7fa2-4a80-b072-92b8201616b8\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.223735 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-scripts\") pod \"a29c392c-7fa2-4a80-b072-92b8201616b8\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.223755 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbrd\" (UniqueName: \"kubernetes.io/projected/a29c392c-7fa2-4a80-b072-92b8201616b8-kube-api-access-cfbrd\") pod \"a29c392c-7fa2-4a80-b072-92b8201616b8\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.223877 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a29c392c-7fa2-4a80-b072-92b8201616b8-etc-machine-id\") pod \"a29c392c-7fa2-4a80-b072-92b8201616b8\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.223950 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-db-sync-config-data\") pod \"a29c392c-7fa2-4a80-b072-92b8201616b8\" (UID: \"a29c392c-7fa2-4a80-b072-92b8201616b8\") " Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.225302 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a29c392c-7fa2-4a80-b072-92b8201616b8-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "a29c392c-7fa2-4a80-b072-92b8201616b8" (UID: "a29c392c-7fa2-4a80-b072-92b8201616b8"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.233728 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-scripts" (OuterVolumeSpecName: "scripts") pod "a29c392c-7fa2-4a80-b072-92b8201616b8" (UID: "a29c392c-7fa2-4a80-b072-92b8201616b8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.233845 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a29c392c-7fa2-4a80-b072-92b8201616b8-kube-api-access-cfbrd" (OuterVolumeSpecName: "kube-api-access-cfbrd") pod "a29c392c-7fa2-4a80-b072-92b8201616b8" (UID: "a29c392c-7fa2-4a80-b072-92b8201616b8"). InnerVolumeSpecName "kube-api-access-cfbrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.233862 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "a29c392c-7fa2-4a80-b072-92b8201616b8" (UID: "a29c392c-7fa2-4a80-b072-92b8201616b8"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.295504 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-config-data" (OuterVolumeSpecName: "config-data") pod "a29c392c-7fa2-4a80-b072-92b8201616b8" (UID: "a29c392c-7fa2-4a80-b072-92b8201616b8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.327094 4784 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a29c392c-7fa2-4a80-b072-92b8201616b8-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.327134 4784 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.327149 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.327159 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.327168 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbrd\" (UniqueName: \"kubernetes.io/projected/a29c392c-7fa2-4a80-b072-92b8201616b8-kube-api-access-cfbrd\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.327751 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a29c392c-7fa2-4a80-b072-92b8201616b8" (UID: "a29c392c-7fa2-4a80-b072-92b8201616b8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.429796 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a29c392c-7fa2-4a80-b072-92b8201616b8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.824737 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-b76bcf676-6p7dt" event={"ID":"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8","Type":"ContainerStarted","Data":"b30ca686fe1b2cd1924ba1613fd13d7f27b3cf8349fcdf5ba3d28e9bc462213b"} Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.824863 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.824903 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.831612 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-57tfg" event={"ID":"a29c392c-7fa2-4a80-b072-92b8201616b8","Type":"ContainerDied","Data":"b5f497f044510f42868bd80e8da919b87b61f8a7421018d7cbb9c6538fe8df3b"} Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.831666 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b5f497f044510f42868bd80e8da919b87b61f8a7421018d7cbb9c6538fe8df3b" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.831759 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-57tfg" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.835469 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" event={"ID":"8b500fc9-50b8-4ca7-8a72-57b10f94427b","Type":"ContainerStarted","Data":"f97fafbcd3cc0337444b64a25f84fab0fd9df9355454662dae0f96b11b0ce115"} Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.835950 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.840964 4784 generic.go:334] "Generic (PLEG): container finished" podID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerID="7bf3ceb793765a3adbf7e614d8742cab8b27c07326767e513445beacda8adc11" exitCode=0 Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.841039 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf077c5c-dec8-41a6-8677-e5f7681c83e5","Type":"ContainerDied","Data":"7bf3ceb793765a3adbf7e614d8742cab8b27c07326767e513445beacda8adc11"} Jan 06 08:35:08 crc kubenswrapper[4784]: I0106 08:35:08.849359 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-b76bcf676-6p7dt" podStartSLOduration=2.849332225 podStartE2EDuration="2.849332225s" podCreationTimestamp="2026-01-06 08:35:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:35:08.848836999 +0000 UTC m=+1210.895009836" watchObservedRunningTime="2026-01-06 08:35:08.849332225 +0000 UTC m=+1210.895505062" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.199638 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" podStartSLOduration=3.199608184 podStartE2EDuration="3.199608184s" podCreationTimestamp="2026-01-06 08:35:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:35:08.878811614 +0000 UTC m=+1210.924984491" watchObservedRunningTime="2026-01-06 08:35:09.199608184 +0000 UTC m=+1211.245781031" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.238635 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 06 08:35:09 crc kubenswrapper[4784]: E0106 08:35:09.239313 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a29c392c-7fa2-4a80-b072-92b8201616b8" containerName="cinder-db-sync" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.239331 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a29c392c-7fa2-4a80-b072-92b8201616b8" containerName="cinder-db-sync" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.239578 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a29c392c-7fa2-4a80-b072-92b8201616b8" containerName="cinder-db-sync" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.241000 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.251385 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.251489 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.251649 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.256511 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-vtlzw" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.277443 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.361916 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3330c8ca-b383-4a37-91c9-62714dfd73f6-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.361989 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.362056 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnkdt\" (UniqueName: \"kubernetes.io/projected/3330c8ca-b383-4a37-91c9-62714dfd73f6-kube-api-access-gnkdt\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.362093 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-scripts\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.362129 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-config-data\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.362176 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.365259 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-cjb76"] Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.436782 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-8fpxg"] Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.438747 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.452013 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-8fpxg"] Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.465226 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-scripts\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.465299 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-config-data\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.465352 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.465458 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3330c8ca-b383-4a37-91c9-62714dfd73f6-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.465497 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.465566 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnkdt\" (UniqueName: \"kubernetes.io/projected/3330c8ca-b383-4a37-91c9-62714dfd73f6-kube-api-access-gnkdt\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.466955 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3330c8ca-b383-4a37-91c9-62714dfd73f6-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.495699 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-scripts\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.498416 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.501665 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-config-data\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.506041 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.522530 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnkdt\" (UniqueName: \"kubernetes.io/projected/3330c8ca-b383-4a37-91c9-62714dfd73f6-kube-api-access-gnkdt\") pod \"cinder-scheduler-0\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.569019 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-ovsdbserver-nb\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.569533 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-config\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.569575 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcjdb\" (UniqueName: \"kubernetes.io/projected/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-kube-api-access-xcjdb\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.569592 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-dns-svc\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.569656 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-dns-swift-storage-0\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.569679 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-ovsdbserver-sb\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.590532 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.671359 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-dns-swift-storage-0\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.671432 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-ovsdbserver-sb\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.671525 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-ovsdbserver-nb\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.671600 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-config\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.671636 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcjdb\" (UniqueName: \"kubernetes.io/projected/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-kube-api-access-xcjdb\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.671656 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-dns-svc\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.673516 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-config\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.673592 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-dns-swift-storage-0\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.673631 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-ovsdbserver-nb\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.674395 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-dns-svc\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.674440 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-ovsdbserver-sb\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.687900 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.689978 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.694429 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.701010 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.708785 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcjdb\" (UniqueName: \"kubernetes.io/projected/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-kube-api-access-xcjdb\") pod \"dnsmasq-dns-6b4f5fc4f-8fpxg\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.775442 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8e191730-2db7-4724-909c-460cbca1569a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.775510 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-config-data\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.775565 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-scripts\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.775600 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e191730-2db7-4724-909c-460cbca1569a-logs\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.775625 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2sw7\" (UniqueName: \"kubernetes.io/projected/8e191730-2db7-4724-909c-460cbca1569a-kube-api-access-z2sw7\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.775647 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.775670 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-config-data-custom\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.878157 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-scripts\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.878228 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e191730-2db7-4724-909c-460cbca1569a-logs\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.878257 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2sw7\" (UniqueName: \"kubernetes.io/projected/8e191730-2db7-4724-909c-460cbca1569a-kube-api-access-z2sw7\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.878306 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.878750 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e191730-2db7-4724-909c-460cbca1569a-logs\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.878770 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-config-data-custom\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.879140 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8e191730-2db7-4724-909c-460cbca1569a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.879225 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-config-data\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.879241 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8e191730-2db7-4724-909c-460cbca1569a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.882421 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-scripts\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.894375 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-config-data-custom\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.894390 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.894822 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-config-data\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.902600 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.915134 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2sw7\" (UniqueName: \"kubernetes.io/projected/8e191730-2db7-4724-909c-460cbca1569a-kube-api-access-z2sw7\") pod \"cinder-api-0\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " pod="openstack/cinder-api-0" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.928752 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-84c65dd87b-gpr7l"] Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.930658 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.932703 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.934023 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 06 08:35:09 crc kubenswrapper[4784]: I0106 08:35:09.970934 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-84c65dd87b-gpr7l"] Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.050991 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.099569 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-config-data\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.100027 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-logs\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.100084 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-config-data-custom\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.100141 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-internal-tls-certs\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.100172 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-public-tls-certs\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.100229 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kg7n5\" (UniqueName: \"kubernetes.io/projected/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-kube-api-access-kg7n5\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.100256 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-combined-ca-bundle\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.122024 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.212675 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-logs\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.212798 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-config-data-custom\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.212892 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-internal-tls-certs\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.212930 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-public-tls-certs\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.213021 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kg7n5\" (UniqueName: \"kubernetes.io/projected/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-kube-api-access-kg7n5\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.213063 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-combined-ca-bundle\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.213111 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-config-data\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.213649 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-logs\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.220664 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-public-tls-certs\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.227232 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-config-data-custom\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.228580 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-config-data\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.230704 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-internal-tls-certs\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.250515 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-combined-ca-bundle\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.256834 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kg7n5\" (UniqueName: \"kubernetes.io/projected/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-kube-api-access-kg7n5\") pod \"barbican-api-84c65dd87b-gpr7l\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.318601 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-sg-core-conf-yaml\") pod \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.318659 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-combined-ca-bundle\") pod \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.318703 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-config-data\") pod \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.318819 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmxtv\" (UniqueName: \"kubernetes.io/projected/bf077c5c-dec8-41a6-8677-e5f7681c83e5-kube-api-access-jmxtv\") pod \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.318943 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf077c5c-dec8-41a6-8677-e5f7681c83e5-log-httpd\") pod \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.318979 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf077c5c-dec8-41a6-8677-e5f7681c83e5-run-httpd\") pod \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.319032 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-scripts\") pod \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\" (UID: \"bf077c5c-dec8-41a6-8677-e5f7681c83e5\") " Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.329860 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf077c5c-dec8-41a6-8677-e5f7681c83e5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "bf077c5c-dec8-41a6-8677-e5f7681c83e5" (UID: "bf077c5c-dec8-41a6-8677-e5f7681c83e5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.330885 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf077c5c-dec8-41a6-8677-e5f7681c83e5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "bf077c5c-dec8-41a6-8677-e5f7681c83e5" (UID: "bf077c5c-dec8-41a6-8677-e5f7681c83e5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.332099 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf077c5c-dec8-41a6-8677-e5f7681c83e5-kube-api-access-jmxtv" (OuterVolumeSpecName: "kube-api-access-jmxtv") pod "bf077c5c-dec8-41a6-8677-e5f7681c83e5" (UID: "bf077c5c-dec8-41a6-8677-e5f7681c83e5"). InnerVolumeSpecName "kube-api-access-jmxtv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.332672 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-scripts" (OuterVolumeSpecName: "scripts") pod "bf077c5c-dec8-41a6-8677-e5f7681c83e5" (UID: "bf077c5c-dec8-41a6-8677-e5f7681c83e5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.375279 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "bf077c5c-dec8-41a6-8677-e5f7681c83e5" (UID: "bf077c5c-dec8-41a6-8677-e5f7681c83e5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.424128 4784 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf077c5c-dec8-41a6-8677-e5f7681c83e5-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.424165 4784 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf077c5c-dec8-41a6-8677-e5f7681c83e5-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.424173 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.424182 4784 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.424193 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmxtv\" (UniqueName: \"kubernetes.io/projected/bf077c5c-dec8-41a6-8677-e5f7681c83e5-kube-api-access-jmxtv\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.433130 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.495584 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bf077c5c-dec8-41a6-8677-e5f7681c83e5" (UID: "bf077c5c-dec8-41a6-8677-e5f7681c83e5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.527390 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.540264 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-config-data" (OuterVolumeSpecName: "config-data") pod "bf077c5c-dec8-41a6-8677-e5f7681c83e5" (UID: "bf077c5c-dec8-41a6-8677-e5f7681c83e5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.574913 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.585820 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-8fpxg"] Jan 06 08:35:10 crc kubenswrapper[4784]: W0106 08:35:10.596478 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3330c8ca_b383_4a37_91c9_62714dfd73f6.slice/crio-21ffcf2d7d841ea59cb668d8a65cc3feff01c504e6706313e9e662bdeecb9943 WatchSource:0}: Error finding container 21ffcf2d7d841ea59cb668d8a65cc3feff01c504e6706313e9e662bdeecb9943: Status 404 returned error can't find the container with id 21ffcf2d7d841ea59cb668d8a65cc3feff01c504e6706313e9e662bdeecb9943 Jan 06 08:35:10 crc kubenswrapper[4784]: W0106 08:35:10.605050 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a76b6a5_83ce_4ded_a5c5_6bdbd80bcfe2.slice/crio-646ef0c4094671fdb606fdbf7e7547f6bea328fb7b9b12a85f9afe2c8e3769b9 WatchSource:0}: Error finding container 646ef0c4094671fdb606fdbf7e7547f6bea328fb7b9b12a85f9afe2c8e3769b9: Status 404 returned error can't find the container with id 646ef0c4094671fdb606fdbf7e7547f6bea328fb7b9b12a85f9afe2c8e3769b9 Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.630741 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf077c5c-dec8-41a6-8677-e5f7681c83e5-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.788884 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 06 08:35:10 crc kubenswrapper[4784]: W0106 08:35:10.804808 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e191730_2db7_4724_909c_460cbca1569a.slice/crio-6c589551c4d8499eadf511941454ade2b2497f781885120abd540a009b2d4569 WatchSource:0}: Error finding container 6c589551c4d8499eadf511941454ade2b2497f781885120abd540a009b2d4569: Status 404 returned error can't find the container with id 6c589551c4d8499eadf511941454ade2b2497f781885120abd540a009b2d4569 Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.880165 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" event={"ID":"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2","Type":"ContainerStarted","Data":"646ef0c4094671fdb606fdbf7e7547f6bea328fb7b9b12a85f9afe2c8e3769b9"} Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.886262 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"3330c8ca-b383-4a37-91c9-62714dfd73f6","Type":"ContainerStarted","Data":"21ffcf2d7d841ea59cb668d8a65cc3feff01c504e6706313e9e662bdeecb9943"} Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.889843 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8e191730-2db7-4724-909c-460cbca1569a","Type":"ContainerStarted","Data":"6c589551c4d8499eadf511941454ade2b2497f781885120abd540a009b2d4569"} Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.892840 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" event={"ID":"3ddaa7ef-b912-4b5b-9bfa-820818220eef","Type":"ContainerStarted","Data":"e8dea317ca214f9ea6144a057e6bb7ef38cd17e2a3566ae30882d733e82bb07b"} Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.893132 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" event={"ID":"3ddaa7ef-b912-4b5b-9bfa-820818220eef","Type":"ContainerStarted","Data":"18aae47178054e0d5a91d219ad4b268e6837bc33b481c8e49e5de6f2ab09b27f"} Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.900185 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-55f595f44f-tzkkl" event={"ID":"8a5f283f-3491-4531-8213-b2c0eb6b3fc8","Type":"ContainerStarted","Data":"1acfc09bce1e2ea954234f8dce2d5022e0b122a502d59c3a110c2b596a5f22d9"} Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.900234 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-55f595f44f-tzkkl" event={"ID":"8a5f283f-3491-4531-8213-b2c0eb6b3fc8","Type":"ContainerStarted","Data":"1bf390988559ad3f54dd862a9c758b447b84e4f67a158e2cd3efd23826681c28"} Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.909521 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" podUID="8b500fc9-50b8-4ca7-8a72-57b10f94427b" containerName="dnsmasq-dns" containerID="cri-o://f97fafbcd3cc0337444b64a25f84fab0fd9df9355454662dae0f96b11b0ce115" gracePeriod=10 Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.910013 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.912651 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf077c5c-dec8-41a6-8677-e5f7681c83e5","Type":"ContainerDied","Data":"61db0b6eed5e929819192ee3637fbe416f0c424c74af61db96a5177f602cc337"} Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.912712 4784 scope.go:117] "RemoveContainer" containerID="129f03cc7dd5ce9eaaeb5161793fdac68d8a5e86fbf5534bdc8f5dbf093f8ab2" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.920015 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" podStartSLOduration=2.138407933 podStartE2EDuration="4.919990342s" podCreationTimestamp="2026-01-06 08:35:06 +0000 UTC" firstStartedPulling="2026-01-06 08:35:07.229741098 +0000 UTC m=+1209.275913935" lastFinishedPulling="2026-01-06 08:35:10.011323507 +0000 UTC m=+1212.057496344" observedRunningTime="2026-01-06 08:35:10.914745559 +0000 UTC m=+1212.960918396" watchObservedRunningTime="2026-01-06 08:35:10.919990342 +0000 UTC m=+1212.966163179" Jan 06 08:35:10 crc kubenswrapper[4784]: I0106 08:35:10.969588 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-55f595f44f-tzkkl" podStartSLOduration=2.07318315 podStartE2EDuration="4.969566319s" podCreationTimestamp="2026-01-06 08:35:06 +0000 UTC" firstStartedPulling="2026-01-06 08:35:07.091608851 +0000 UTC m=+1209.137781688" lastFinishedPulling="2026-01-06 08:35:09.98799202 +0000 UTC m=+1212.034164857" observedRunningTime="2026-01-06 08:35:10.961666772 +0000 UTC m=+1213.007839609" watchObservedRunningTime="2026-01-06 08:35:10.969566319 +0000 UTC m=+1213.015739156" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.021515 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-84c65dd87b-gpr7l"] Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.051996 4784 scope.go:117] "RemoveContainer" containerID="d5faeb1a613a5c8eebd6a7437055973ba171c7ef9929ddbd631522a15773dd98" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.057087 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.118273 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.158565 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:11 crc kubenswrapper[4784]: E0106 08:35:11.159255 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="ceilometer-notification-agent" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.159275 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="ceilometer-notification-agent" Jan 06 08:35:11 crc kubenswrapper[4784]: E0106 08:35:11.159292 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="sg-core" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.159299 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="sg-core" Jan 06 08:35:11 crc kubenswrapper[4784]: E0106 08:35:11.159326 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="proxy-httpd" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.159339 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="proxy-httpd" Jan 06 08:35:11 crc kubenswrapper[4784]: E0106 08:35:11.159358 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="ceilometer-central-agent" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.159365 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="ceilometer-central-agent" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.159588 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="ceilometer-notification-agent" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.159619 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="sg-core" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.159633 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="ceilometer-central-agent" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.159641 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" containerName="proxy-httpd" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.162168 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.165345 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.165508 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.174567 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.235293 4784 scope.go:117] "RemoveContainer" containerID="7bf3ceb793765a3adbf7e614d8742cab8b27c07326767e513445beacda8adc11" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.275709 4784 scope.go:117] "RemoveContainer" containerID="dec4e7e92f5454f23c4c06e7dde325f898abb232e1ee598894529e931f51bf12" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.353478 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-scripts\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.353562 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-run-httpd\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.353598 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-config-data\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.353633 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-log-httpd\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.353725 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.353748 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.354055 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47r2t\" (UniqueName: \"kubernetes.io/projected/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-kube-api-access-47r2t\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.456358 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-scripts\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.457391 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-run-httpd\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.457777 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-run-httpd\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.457853 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-config-data\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.457885 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-log-httpd\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.458346 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.458378 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.458477 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47r2t\" (UniqueName: \"kubernetes.io/projected/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-kube-api-access-47r2t\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.458654 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-log-httpd\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.477574 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.477828 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-config-data\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.479175 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.480002 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47r2t\" (UniqueName: \"kubernetes.io/projected/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-kube-api-access-47r2t\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.481241 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-scripts\") pod \"ceilometer-0\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.502537 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.596932 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.765655 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-dns-svc\") pod \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.766096 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-ovsdbserver-nb\") pod \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.766161 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4f9f2\" (UniqueName: \"kubernetes.io/projected/8b500fc9-50b8-4ca7-8a72-57b10f94427b-kube-api-access-4f9f2\") pod \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.766238 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-dns-swift-storage-0\") pod \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.766270 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-ovsdbserver-sb\") pod \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.766380 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-config\") pod \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\" (UID: \"8b500fc9-50b8-4ca7-8a72-57b10f94427b\") " Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.772286 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b500fc9-50b8-4ca7-8a72-57b10f94427b-kube-api-access-4f9f2" (OuterVolumeSpecName: "kube-api-access-4f9f2") pod "8b500fc9-50b8-4ca7-8a72-57b10f94427b" (UID: "8b500fc9-50b8-4ca7-8a72-57b10f94427b"). InnerVolumeSpecName "kube-api-access-4f9f2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.844242 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8b500fc9-50b8-4ca7-8a72-57b10f94427b" (UID: "8b500fc9-50b8-4ca7-8a72-57b10f94427b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.846032 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-config" (OuterVolumeSpecName: "config") pod "8b500fc9-50b8-4ca7-8a72-57b10f94427b" (UID: "8b500fc9-50b8-4ca7-8a72-57b10f94427b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.863220 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8b500fc9-50b8-4ca7-8a72-57b10f94427b" (UID: "8b500fc9-50b8-4ca7-8a72-57b10f94427b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.869579 4784 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.869611 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.869622 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.869630 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4f9f2\" (UniqueName: \"kubernetes.io/projected/8b500fc9-50b8-4ca7-8a72-57b10f94427b-kube-api-access-4f9f2\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.870772 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8b500fc9-50b8-4ca7-8a72-57b10f94427b" (UID: "8b500fc9-50b8-4ca7-8a72-57b10f94427b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.909632 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8b500fc9-50b8-4ca7-8a72-57b10f94427b" (UID: "8b500fc9-50b8-4ca7-8a72-57b10f94427b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.927083 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84c65dd87b-gpr7l" event={"ID":"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1","Type":"ContainerStarted","Data":"fb3044be153df30f3ff3fd00d5cb70d02783bb32a511ab902bb2119d0727fe42"} Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.927151 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84c65dd87b-gpr7l" event={"ID":"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1","Type":"ContainerStarted","Data":"88b494cb7dd981f0cd85a374cc0ff0b0daec7659dd58f2b8d3b6341cb598c581"} Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.930322 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8e191730-2db7-4724-909c-460cbca1569a","Type":"ContainerStarted","Data":"c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966"} Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.933032 4784 generic.go:334] "Generic (PLEG): container finished" podID="8b500fc9-50b8-4ca7-8a72-57b10f94427b" containerID="f97fafbcd3cc0337444b64a25f84fab0fd9df9355454662dae0f96b11b0ce115" exitCode=0 Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.933091 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" event={"ID":"8b500fc9-50b8-4ca7-8a72-57b10f94427b","Type":"ContainerDied","Data":"f97fafbcd3cc0337444b64a25f84fab0fd9df9355454662dae0f96b11b0ce115"} Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.933112 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" event={"ID":"8b500fc9-50b8-4ca7-8a72-57b10f94427b","Type":"ContainerDied","Data":"a3b9962858a070b3fc9675e0f60749f5873108ac83d2de3a9ee0bef52a9e8f34"} Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.933131 4784 scope.go:117] "RemoveContainer" containerID="f97fafbcd3cc0337444b64a25f84fab0fd9df9355454662dae0f96b11b0ce115" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.933254 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c4dfcffc-cjb76" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.953527 4784 generic.go:334] "Generic (PLEG): container finished" podID="1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" containerID="0f2565613a2e2471a486b91ac944082cd1665104e6b52d3fabe835597650894f" exitCode=0 Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.953610 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" event={"ID":"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2","Type":"ContainerDied","Data":"0f2565613a2e2471a486b91ac944082cd1665104e6b52d3fabe835597650894f"} Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.975890 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:11 crc kubenswrapper[4784]: I0106 08:35:11.975927 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8b500fc9-50b8-4ca7-8a72-57b10f94427b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:12 crc kubenswrapper[4784]: I0106 08:35:12.070913 4784 scope.go:117] "RemoveContainer" containerID="e107aaeb2a2a1a784a3d9f61f6a770680bf2d6a2e2f9bfebc140613ae2a1359d" Jan 06 08:35:12 crc kubenswrapper[4784]: I0106 08:35:12.093735 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-cjb76"] Jan 06 08:35:12 crc kubenswrapper[4784]: I0106 08:35:12.118359 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-cjb76"] Jan 06 08:35:12 crc kubenswrapper[4784]: I0106 08:35:12.122595 4784 scope.go:117] "RemoveContainer" containerID="f97fafbcd3cc0337444b64a25f84fab0fd9df9355454662dae0f96b11b0ce115" Jan 06 08:35:12 crc kubenswrapper[4784]: E0106 08:35:12.125995 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f97fafbcd3cc0337444b64a25f84fab0fd9df9355454662dae0f96b11b0ce115\": container with ID starting with f97fafbcd3cc0337444b64a25f84fab0fd9df9355454662dae0f96b11b0ce115 not found: ID does not exist" containerID="f97fafbcd3cc0337444b64a25f84fab0fd9df9355454662dae0f96b11b0ce115" Jan 06 08:35:12 crc kubenswrapper[4784]: I0106 08:35:12.126056 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f97fafbcd3cc0337444b64a25f84fab0fd9df9355454662dae0f96b11b0ce115"} err="failed to get container status \"f97fafbcd3cc0337444b64a25f84fab0fd9df9355454662dae0f96b11b0ce115\": rpc error: code = NotFound desc = could not find container \"f97fafbcd3cc0337444b64a25f84fab0fd9df9355454662dae0f96b11b0ce115\": container with ID starting with f97fafbcd3cc0337444b64a25f84fab0fd9df9355454662dae0f96b11b0ce115 not found: ID does not exist" Jan 06 08:35:12 crc kubenswrapper[4784]: I0106 08:35:12.126088 4784 scope.go:117] "RemoveContainer" containerID="e107aaeb2a2a1a784a3d9f61f6a770680bf2d6a2e2f9bfebc140613ae2a1359d" Jan 06 08:35:12 crc kubenswrapper[4784]: E0106 08:35:12.126752 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e107aaeb2a2a1a784a3d9f61f6a770680bf2d6a2e2f9bfebc140613ae2a1359d\": container with ID starting with e107aaeb2a2a1a784a3d9f61f6a770680bf2d6a2e2f9bfebc140613ae2a1359d not found: ID does not exist" containerID="e107aaeb2a2a1a784a3d9f61f6a770680bf2d6a2e2f9bfebc140613ae2a1359d" Jan 06 08:35:12 crc kubenswrapper[4784]: I0106 08:35:12.126783 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e107aaeb2a2a1a784a3d9f61f6a770680bf2d6a2e2f9bfebc140613ae2a1359d"} err="failed to get container status \"e107aaeb2a2a1a784a3d9f61f6a770680bf2d6a2e2f9bfebc140613ae2a1359d\": rpc error: code = NotFound desc = could not find container \"e107aaeb2a2a1a784a3d9f61f6a770680bf2d6a2e2f9bfebc140613ae2a1359d\": container with ID starting with e107aaeb2a2a1a784a3d9f61f6a770680bf2d6a2e2f9bfebc140613ae2a1359d not found: ID does not exist" Jan 06 08:35:12 crc kubenswrapper[4784]: I0106 08:35:12.143400 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:12 crc kubenswrapper[4784]: I0106 08:35:12.332372 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b500fc9-50b8-4ca7-8a72-57b10f94427b" path="/var/lib/kubelet/pods/8b500fc9-50b8-4ca7-8a72-57b10f94427b/volumes" Jan 06 08:35:12 crc kubenswrapper[4784]: I0106 08:35:12.333868 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf077c5c-dec8-41a6-8677-e5f7681c83e5" path="/var/lib/kubelet/pods/bf077c5c-dec8-41a6-8677-e5f7681c83e5/volumes" Jan 06 08:35:12 crc kubenswrapper[4784]: I0106 08:35:12.847864 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 06 08:35:12 crc kubenswrapper[4784]: I0106 08:35:12.987215 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84c65dd87b-gpr7l" event={"ID":"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1","Type":"ContainerStarted","Data":"6fffca0776ab46872f3102c7fc0a42be2362bb53a3780f33a08136d43b9b8eae"} Jan 06 08:35:12 crc kubenswrapper[4784]: I0106 08:35:12.987817 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:12 crc kubenswrapper[4784]: I0106 08:35:12.987836 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:12 crc kubenswrapper[4784]: I0106 08:35:12.997966 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"3330c8ca-b383-4a37-91c9-62714dfd73f6","Type":"ContainerStarted","Data":"ef5f267b643f94c633862d8c654534de8df6ec2d093ffa0ca67b818724411b62"} Jan 06 08:35:13 crc kubenswrapper[4784]: I0106 08:35:13.003993 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8e191730-2db7-4724-909c-460cbca1569a","Type":"ContainerStarted","Data":"c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503"} Jan 06 08:35:13 crc kubenswrapper[4784]: I0106 08:35:13.005255 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 06 08:35:13 crc kubenswrapper[4784]: I0106 08:35:13.013768 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b","Type":"ContainerStarted","Data":"cf07473f89febf2955db2be9cdf9b8091d10f25a62d991dcebf4b454e9795a49"} Jan 06 08:35:13 crc kubenswrapper[4784]: I0106 08:35:13.030174 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-84c65dd87b-gpr7l" podStartSLOduration=4.030153534 podStartE2EDuration="4.030153534s" podCreationTimestamp="2026-01-06 08:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:35:13.024932551 +0000 UTC m=+1215.071105388" watchObservedRunningTime="2026-01-06 08:35:13.030153534 +0000 UTC m=+1215.076326371" Jan 06 08:35:13 crc kubenswrapper[4784]: I0106 08:35:13.030739 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" event={"ID":"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2","Type":"ContainerStarted","Data":"f90613b7d982b1882c5bb14f3d0894b2d2113c8fdce7e30d214f8f535fda3796"} Jan 06 08:35:13 crc kubenswrapper[4784]: I0106 08:35:13.031410 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:13 crc kubenswrapper[4784]: I0106 08:35:13.062432 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.062398498 podStartE2EDuration="4.062398498s" podCreationTimestamp="2026-01-06 08:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:35:13.057354932 +0000 UTC m=+1215.103527769" watchObservedRunningTime="2026-01-06 08:35:13.062398498 +0000 UTC m=+1215.108571335" Jan 06 08:35:13 crc kubenswrapper[4784]: I0106 08:35:13.089639 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" podStartSLOduration=4.089619128 podStartE2EDuration="4.089619128s" podCreationTimestamp="2026-01-06 08:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:35:13.088380339 +0000 UTC m=+1215.134553176" watchObservedRunningTime="2026-01-06 08:35:13.089619128 +0000 UTC m=+1215.135791965" Jan 06 08:35:14 crc kubenswrapper[4784]: I0106 08:35:14.084452 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b","Type":"ContainerStarted","Data":"1279849ab069a3461cdc6028c4f6c0dba4c4f0ff761e3f67305a9a23324eb11e"} Jan 06 08:35:14 crc kubenswrapper[4784]: I0106 08:35:14.091133 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="8e191730-2db7-4724-909c-460cbca1569a" containerName="cinder-api-log" containerID="cri-o://c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966" gracePeriod=30 Jan 06 08:35:14 crc kubenswrapper[4784]: I0106 08:35:14.092063 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"3330c8ca-b383-4a37-91c9-62714dfd73f6","Type":"ContainerStarted","Data":"633bbb3d795fd90d6b558760f6c67721d632afb6f90363aa001a5f84fc707951"} Jan 06 08:35:14 crc kubenswrapper[4784]: I0106 08:35:14.093665 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="8e191730-2db7-4724-909c-460cbca1569a" containerName="cinder-api" containerID="cri-o://c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503" gracePeriod=30 Jan 06 08:35:14 crc kubenswrapper[4784]: I0106 08:35:14.119804 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.094615024 podStartE2EDuration="5.119779701s" podCreationTimestamp="2026-01-06 08:35:09 +0000 UTC" firstStartedPulling="2026-01-06 08:35:10.605849581 +0000 UTC m=+1212.652022418" lastFinishedPulling="2026-01-06 08:35:11.631014258 +0000 UTC m=+1213.677187095" observedRunningTime="2026-01-06 08:35:14.114802225 +0000 UTC m=+1216.160975062" watchObservedRunningTime="2026-01-06 08:35:14.119779701 +0000 UTC m=+1216.165952538" Jan 06 08:35:14 crc kubenswrapper[4784]: I0106 08:35:14.350772 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:35:14 crc kubenswrapper[4784]: I0106 08:35:14.351275 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:35:14 crc kubenswrapper[4784]: I0106 08:35:14.351333 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:35:14 crc kubenswrapper[4784]: I0106 08:35:14.354161 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"19653971273eef9ff17d8783cce6bbf50869f896c3dc99c1be3ca028e61421fd"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 08:35:14 crc kubenswrapper[4784]: I0106 08:35:14.354218 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://19653971273eef9ff17d8783cce6bbf50869f896c3dc99c1be3ca028e61421fd" gracePeriod=600 Jan 06 08:35:14 crc kubenswrapper[4784]: I0106 08:35:14.591971 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 06 08:35:14 crc kubenswrapper[4784]: I0106 08:35:14.961090 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.073282 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-combined-ca-bundle\") pod \"8e191730-2db7-4724-909c-460cbca1569a\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.073352 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-config-data\") pod \"8e191730-2db7-4724-909c-460cbca1569a\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.073401 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-config-data-custom\") pod \"8e191730-2db7-4724-909c-460cbca1569a\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.073536 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e191730-2db7-4724-909c-460cbca1569a-logs\") pod \"8e191730-2db7-4724-909c-460cbca1569a\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.073730 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-scripts\") pod \"8e191730-2db7-4724-909c-460cbca1569a\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.073767 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8e191730-2db7-4724-909c-460cbca1569a-etc-machine-id\") pod \"8e191730-2db7-4724-909c-460cbca1569a\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.073895 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2sw7\" (UniqueName: \"kubernetes.io/projected/8e191730-2db7-4724-909c-460cbca1569a-kube-api-access-z2sw7\") pod \"8e191730-2db7-4724-909c-460cbca1569a\" (UID: \"8e191730-2db7-4724-909c-460cbca1569a\") " Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.080651 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8e191730-2db7-4724-909c-460cbca1569a-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "8e191730-2db7-4724-909c-460cbca1569a" (UID: "8e191730-2db7-4724-909c-460cbca1569a"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.080926 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e191730-2db7-4724-909c-460cbca1569a-logs" (OuterVolumeSpecName: "logs") pod "8e191730-2db7-4724-909c-460cbca1569a" (UID: "8e191730-2db7-4724-909c-460cbca1569a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.086790 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8e191730-2db7-4724-909c-460cbca1569a" (UID: "8e191730-2db7-4724-909c-460cbca1569a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.096519 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e191730-2db7-4724-909c-460cbca1569a-kube-api-access-z2sw7" (OuterVolumeSpecName: "kube-api-access-z2sw7") pod "8e191730-2db7-4724-909c-460cbca1569a" (UID: "8e191730-2db7-4724-909c-460cbca1569a"). InnerVolumeSpecName "kube-api-access-z2sw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.096881 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-scripts" (OuterVolumeSpecName: "scripts") pod "8e191730-2db7-4724-909c-460cbca1569a" (UID: "8e191730-2db7-4724-909c-460cbca1569a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.121916 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e191730-2db7-4724-909c-460cbca1569a" (UID: "8e191730-2db7-4724-909c-460cbca1569a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.122195 4784 generic.go:334] "Generic (PLEG): container finished" podID="8e191730-2db7-4724-909c-460cbca1569a" containerID="c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503" exitCode=0 Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.122336 4784 generic.go:334] "Generic (PLEG): container finished" podID="8e191730-2db7-4724-909c-460cbca1569a" containerID="c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966" exitCode=143 Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.122256 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.122277 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8e191730-2db7-4724-909c-460cbca1569a","Type":"ContainerDied","Data":"c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503"} Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.122537 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8e191730-2db7-4724-909c-460cbca1569a","Type":"ContainerDied","Data":"c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966"} Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.122590 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8e191730-2db7-4724-909c-460cbca1569a","Type":"ContainerDied","Data":"6c589551c4d8499eadf511941454ade2b2497f781885120abd540a009b2d4569"} Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.122619 4784 scope.go:117] "RemoveContainer" containerID="c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.128061 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b","Type":"ContainerStarted","Data":"62023330e700a944c57ae8d989939ab313b16d0c7816f13cf9fcafa3226cb8ea"} Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.134461 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="19653971273eef9ff17d8783cce6bbf50869f896c3dc99c1be3ca028e61421fd" exitCode=0 Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.135566 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"19653971273eef9ff17d8783cce6bbf50869f896c3dc99c1be3ca028e61421fd"} Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.162152 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-config-data" (OuterVolumeSpecName: "config-data") pod "8e191730-2db7-4724-909c-460cbca1569a" (UID: "8e191730-2db7-4724-909c-460cbca1569a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.177525 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e191730-2db7-4724-909c-460cbca1569a-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.177629 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.177646 4784 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8e191730-2db7-4724-909c-460cbca1569a-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.177660 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2sw7\" (UniqueName: \"kubernetes.io/projected/8e191730-2db7-4724-909c-460cbca1569a-kube-api-access-z2sw7\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.177674 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.177685 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.177700 4784 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8e191730-2db7-4724-909c-460cbca1569a-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.235485 4784 scope.go:117] "RemoveContainer" containerID="c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.284431 4784 scope.go:117] "RemoveContainer" containerID="c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503" Jan 06 08:35:15 crc kubenswrapper[4784]: E0106 08:35:15.285016 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503\": container with ID starting with c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503 not found: ID does not exist" containerID="c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.285603 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503"} err="failed to get container status \"c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503\": rpc error: code = NotFound desc = could not find container \"c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503\": container with ID starting with c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503 not found: ID does not exist" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.285651 4784 scope.go:117] "RemoveContainer" containerID="c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966" Jan 06 08:35:15 crc kubenswrapper[4784]: E0106 08:35:15.286283 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966\": container with ID starting with c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966 not found: ID does not exist" containerID="c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.286313 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966"} err="failed to get container status \"c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966\": rpc error: code = NotFound desc = could not find container \"c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966\": container with ID starting with c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966 not found: ID does not exist" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.286327 4784 scope.go:117] "RemoveContainer" containerID="c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.286807 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503"} err="failed to get container status \"c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503\": rpc error: code = NotFound desc = could not find container \"c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503\": container with ID starting with c778b4e96bfa6758bca49ba60636d2164204ff630c7330b338bd5e11cb205503 not found: ID does not exist" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.286852 4784 scope.go:117] "RemoveContainer" containerID="c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.287169 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966"} err="failed to get container status \"c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966\": rpc error: code = NotFound desc = could not find container \"c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966\": container with ID starting with c9be7ebeda2f740f14e261d449cc183a35d989a3319be25e94fb9b67a057b966 not found: ID does not exist" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.287192 4784 scope.go:117] "RemoveContainer" containerID="9cd4e5f96c6907f66a8c281dacda866138cb3ec7ef90bd2f8123d427c09cf064" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.524590 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.542195 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.557072 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 06 08:35:15 crc kubenswrapper[4784]: E0106 08:35:15.557786 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e191730-2db7-4724-909c-460cbca1569a" containerName="cinder-api" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.557814 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e191730-2db7-4724-909c-460cbca1569a" containerName="cinder-api" Jan 06 08:35:15 crc kubenswrapper[4784]: E0106 08:35:15.557842 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b500fc9-50b8-4ca7-8a72-57b10f94427b" containerName="init" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.557851 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b500fc9-50b8-4ca7-8a72-57b10f94427b" containerName="init" Jan 06 08:35:15 crc kubenswrapper[4784]: E0106 08:35:15.557877 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e191730-2db7-4724-909c-460cbca1569a" containerName="cinder-api-log" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.557887 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e191730-2db7-4724-909c-460cbca1569a" containerName="cinder-api-log" Jan 06 08:35:15 crc kubenswrapper[4784]: E0106 08:35:15.557914 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b500fc9-50b8-4ca7-8a72-57b10f94427b" containerName="dnsmasq-dns" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.557923 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b500fc9-50b8-4ca7-8a72-57b10f94427b" containerName="dnsmasq-dns" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.558190 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e191730-2db7-4724-909c-460cbca1569a" containerName="cinder-api-log" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.558232 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b500fc9-50b8-4ca7-8a72-57b10f94427b" containerName="dnsmasq-dns" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.558247 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e191730-2db7-4724-909c-460cbca1569a" containerName="cinder-api" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.562034 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.565186 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.565575 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.567832 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.567839 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.699313 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-scripts\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.700738 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.700775 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/15cd1678-570e-47b5-bcb0-6745b8aa95cb-etc-machine-id\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.700936 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52bvq\" (UniqueName: \"kubernetes.io/projected/15cd1678-570e-47b5-bcb0-6745b8aa95cb-kube-api-access-52bvq\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.700967 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-public-tls-certs\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.701127 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-config-data-custom\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.701194 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-config-data\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.701253 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.701297 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/15cd1678-570e-47b5-bcb0-6745b8aa95cb-logs\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.803971 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-scripts\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.804074 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.804106 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/15cd1678-570e-47b5-bcb0-6745b8aa95cb-etc-machine-id\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.804179 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52bvq\" (UniqueName: \"kubernetes.io/projected/15cd1678-570e-47b5-bcb0-6745b8aa95cb-kube-api-access-52bvq\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.804203 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-public-tls-certs\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.804227 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-config-data-custom\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.804245 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-config-data\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.804272 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.804290 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/15cd1678-570e-47b5-bcb0-6745b8aa95cb-logs\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.804930 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/15cd1678-570e-47b5-bcb0-6745b8aa95cb-logs\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.805005 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/15cd1678-570e-47b5-bcb0-6745b8aa95cb-etc-machine-id\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.811143 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-config-data-custom\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.811583 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.812914 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-scripts\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.813711 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-public-tls-certs\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.822780 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-config-data\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.829494 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.833500 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52bvq\" (UniqueName: \"kubernetes.io/projected/15cd1678-570e-47b5-bcb0-6745b8aa95cb-kube-api-access-52bvq\") pod \"cinder-api-0\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " pod="openstack/cinder-api-0" Jan 06 08:35:15 crc kubenswrapper[4784]: I0106 08:35:15.889822 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 06 08:35:16 crc kubenswrapper[4784]: I0106 08:35:16.149838 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"a7659b10b1b4bc4ca6ce124339b33561daf47c43badfd76a44e91fdc1fbdd919"} Jan 06 08:35:16 crc kubenswrapper[4784]: I0106 08:35:16.157241 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b","Type":"ContainerStarted","Data":"e7ed9a9b7deb5129aee76284c1c87b49885fd897012779cc8a4098572d8ccf83"} Jan 06 08:35:16 crc kubenswrapper[4784]: I0106 08:35:16.327243 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e191730-2db7-4724-909c-460cbca1569a" path="/var/lib/kubelet/pods/8e191730-2db7-4724-909c-460cbca1569a/volumes" Jan 06 08:35:16 crc kubenswrapper[4784]: I0106 08:35:16.396045 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 06 08:35:16 crc kubenswrapper[4784]: I0106 08:35:16.710842 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:35:17 crc kubenswrapper[4784]: I0106 08:35:17.177289 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"15cd1678-570e-47b5-bcb0-6745b8aa95cb","Type":"ContainerStarted","Data":"308fa13267e4be01707682d19351f9c98d583de145ec312e6ae995eb800699ba"} Jan 06 08:35:18 crc kubenswrapper[4784]: I0106 08:35:18.191775 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"15cd1678-570e-47b5-bcb0-6745b8aa95cb","Type":"ContainerStarted","Data":"9f1c3888df0343f9b1d8b44706fe2ef8ae987e71456454a609fae003c73320e1"} Jan 06 08:35:18 crc kubenswrapper[4784]: I0106 08:35:18.192394 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"15cd1678-570e-47b5-bcb0-6745b8aa95cb","Type":"ContainerStarted","Data":"952523238c0159b22bae45fd0feb984398f4e41c261b86b0227306adbbc37885"} Jan 06 08:35:18 crc kubenswrapper[4784]: I0106 08:35:18.192616 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 06 08:35:18 crc kubenswrapper[4784]: I0106 08:35:18.199788 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b","Type":"ContainerStarted","Data":"6da0629a64f51120a10ec58ba38aa00af272b821093ed0733990d728e7347bae"} Jan 06 08:35:18 crc kubenswrapper[4784]: I0106 08:35:18.200203 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 06 08:35:18 crc kubenswrapper[4784]: I0106 08:35:18.226640 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.226615794 podStartE2EDuration="3.226615794s" podCreationTimestamp="2026-01-06 08:35:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:35:18.217294873 +0000 UTC m=+1220.263467710" watchObservedRunningTime="2026-01-06 08:35:18.226615794 +0000 UTC m=+1220.272788631" Jan 06 08:35:18 crc kubenswrapper[4784]: I0106 08:35:18.246570 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.512931633 podStartE2EDuration="7.246517184s" podCreationTimestamp="2026-01-06 08:35:11 +0000 UTC" firstStartedPulling="2026-01-06 08:35:12.141880293 +0000 UTC m=+1214.188053130" lastFinishedPulling="2026-01-06 08:35:16.875465844 +0000 UTC m=+1218.921638681" observedRunningTime="2026-01-06 08:35:18.242950193 +0000 UTC m=+1220.289123030" watchObservedRunningTime="2026-01-06 08:35:18.246517184 +0000 UTC m=+1220.292690021" Jan 06 08:35:18 crc kubenswrapper[4784]: I0106 08:35:18.611077 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:18 crc kubenswrapper[4784]: I0106 08:35:18.665279 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:19 crc kubenswrapper[4784]: I0106 08:35:19.248360 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:35:19 crc kubenswrapper[4784]: I0106 08:35:19.357676 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-646fd5ff66-md6q9"] Jan 06 08:35:19 crc kubenswrapper[4784]: I0106 08:35:19.357973 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-646fd5ff66-md6q9" podUID="98f474cd-143e-4f7b-8269-eca3e3c1b6b0" containerName="neutron-api" containerID="cri-o://71de650cad0dde73291bd080b9ece2d17f9c3a722f41f857e748759314ce259a" gracePeriod=30 Jan 06 08:35:19 crc kubenswrapper[4784]: I0106 08:35:19.358594 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-646fd5ff66-md6q9" podUID="98f474cd-143e-4f7b-8269-eca3e3c1b6b0" containerName="neutron-httpd" containerID="cri-o://5c0f8bf209994f84889c0f824711560eeddf5feb2740b5e35a651ac009c166d4" gracePeriod=30 Jan 06 08:35:19 crc kubenswrapper[4784]: I0106 08:35:19.905885 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:35:19 crc kubenswrapper[4784]: I0106 08:35:19.989158 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-cfl2q"] Jan 06 08:35:19 crc kubenswrapper[4784]: I0106 08:35:19.989457 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" podUID="cb378c55-6158-463a-8c26-eaef2c173cc5" containerName="dnsmasq-dns" containerID="cri-o://8472cd9de42ff397eff4358dec557a1905e937e60b1dbc897a83a21ef79ce87b" gracePeriod=10 Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.095374 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.206707 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.251410 4784 generic.go:334] "Generic (PLEG): container finished" podID="98f474cd-143e-4f7b-8269-eca3e3c1b6b0" containerID="5c0f8bf209994f84889c0f824711560eeddf5feb2740b5e35a651ac009c166d4" exitCode=0 Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.251480 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-646fd5ff66-md6q9" event={"ID":"98f474cd-143e-4f7b-8269-eca3e3c1b6b0","Type":"ContainerDied","Data":"5c0f8bf209994f84889c0f824711560eeddf5feb2740b5e35a651ac009c166d4"} Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.284816 4784 generic.go:334] "Generic (PLEG): container finished" podID="cb378c55-6158-463a-8c26-eaef2c173cc5" containerID="8472cd9de42ff397eff4358dec557a1905e937e60b1dbc897a83a21ef79ce87b" exitCode=0 Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.285072 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="3330c8ca-b383-4a37-91c9-62714dfd73f6" containerName="cinder-scheduler" containerID="cri-o://ef5f267b643f94c633862d8c654534de8df6ec2d093ffa0ca67b818724411b62" gracePeriod=30 Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.285177 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" event={"ID":"cb378c55-6158-463a-8c26-eaef2c173cc5","Type":"ContainerDied","Data":"8472cd9de42ff397eff4358dec557a1905e937e60b1dbc897a83a21ef79ce87b"} Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.285381 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="3330c8ca-b383-4a37-91c9-62714dfd73f6" containerName="probe" containerID="cri-o://633bbb3d795fd90d6b558760f6c67721d632afb6f90363aa001a5f84fc707951" gracePeriod=30 Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.362282 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.364931 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.756828 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.869150 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvkgc\" (UniqueName: \"kubernetes.io/projected/cb378c55-6158-463a-8c26-eaef2c173cc5-kube-api-access-wvkgc\") pod \"cb378c55-6158-463a-8c26-eaef2c173cc5\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.869283 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-dns-swift-storage-0\") pod \"cb378c55-6158-463a-8c26-eaef2c173cc5\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.869408 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-ovsdbserver-nb\") pod \"cb378c55-6158-463a-8c26-eaef2c173cc5\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.869493 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-dns-svc\") pod \"cb378c55-6158-463a-8c26-eaef2c173cc5\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.869609 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-config\") pod \"cb378c55-6158-463a-8c26-eaef2c173cc5\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.869633 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-ovsdbserver-sb\") pod \"cb378c55-6158-463a-8c26-eaef2c173cc5\" (UID: \"cb378c55-6158-463a-8c26-eaef2c173cc5\") " Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.896906 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb378c55-6158-463a-8c26-eaef2c173cc5-kube-api-access-wvkgc" (OuterVolumeSpecName: "kube-api-access-wvkgc") pod "cb378c55-6158-463a-8c26-eaef2c173cc5" (UID: "cb378c55-6158-463a-8c26-eaef2c173cc5"). InnerVolumeSpecName "kube-api-access-wvkgc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.957768 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cb378c55-6158-463a-8c26-eaef2c173cc5" (UID: "cb378c55-6158-463a-8c26-eaef2c173cc5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.967782 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-config" (OuterVolumeSpecName: "config") pod "cb378c55-6158-463a-8c26-eaef2c173cc5" (UID: "cb378c55-6158-463a-8c26-eaef2c173cc5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.972252 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.972277 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.972289 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvkgc\" (UniqueName: \"kubernetes.io/projected/cb378c55-6158-463a-8c26-eaef2c173cc5-kube-api-access-wvkgc\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.977015 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cb378c55-6158-463a-8c26-eaef2c173cc5" (UID: "cb378c55-6158-463a-8c26-eaef2c173cc5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.989734 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "cb378c55-6158-463a-8c26-eaef2c173cc5" (UID: "cb378c55-6158-463a-8c26-eaef2c173cc5"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:20 crc kubenswrapper[4784]: I0106 08:35:20.995096 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cb378c55-6158-463a-8c26-eaef2c173cc5" (UID: "cb378c55-6158-463a-8c26-eaef2c173cc5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:21 crc kubenswrapper[4784]: I0106 08:35:21.073996 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:21 crc kubenswrapper[4784]: I0106 08:35:21.074052 4784 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:21 crc kubenswrapper[4784]: I0106 08:35:21.074063 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb378c55-6158-463a-8c26-eaef2c173cc5-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:21 crc kubenswrapper[4784]: I0106 08:35:21.298437 4784 generic.go:334] "Generic (PLEG): container finished" podID="3330c8ca-b383-4a37-91c9-62714dfd73f6" containerID="633bbb3d795fd90d6b558760f6c67721d632afb6f90363aa001a5f84fc707951" exitCode=0 Jan 06 08:35:21 crc kubenswrapper[4784]: I0106 08:35:21.298526 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"3330c8ca-b383-4a37-91c9-62714dfd73f6","Type":"ContainerDied","Data":"633bbb3d795fd90d6b558760f6c67721d632afb6f90363aa001a5f84fc707951"} Jan 06 08:35:21 crc kubenswrapper[4784]: I0106 08:35:21.300827 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" Jan 06 08:35:21 crc kubenswrapper[4784]: I0106 08:35:21.300828 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb67c87c9-cfl2q" event={"ID":"cb378c55-6158-463a-8c26-eaef2c173cc5","Type":"ContainerDied","Data":"14b9b5e07b81973e1a3bc1a44f9f66a2da8de46a3b65f94352d7252b1ff9264a"} Jan 06 08:35:21 crc kubenswrapper[4784]: I0106 08:35:21.300944 4784 scope.go:117] "RemoveContainer" containerID="8472cd9de42ff397eff4358dec557a1905e937e60b1dbc897a83a21ef79ce87b" Jan 06 08:35:21 crc kubenswrapper[4784]: I0106 08:35:21.336332 4784 scope.go:117] "RemoveContainer" containerID="9e938fe53de519b5ef7c9e8139bb950085c933cd19f796332bd04026db444dd6" Jan 06 08:35:21 crc kubenswrapper[4784]: I0106 08:35:21.343746 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-cfl2q"] Jan 06 08:35:21 crc kubenswrapper[4784]: I0106 08:35:21.357249 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-cfl2q"] Jan 06 08:35:22 crc kubenswrapper[4784]: I0106 08:35:22.326612 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb378c55-6158-463a-8c26-eaef2c173cc5" path="/var/lib/kubelet/pods/cb378c55-6158-463a-8c26-eaef2c173cc5/volumes" Jan 06 08:35:22 crc kubenswrapper[4784]: I0106 08:35:22.338113 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:22 crc kubenswrapper[4784]: I0106 08:35:22.427731 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:35:22 crc kubenswrapper[4784]: I0106 08:35:22.504802 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-b76bcf676-6p7dt"] Jan 06 08:35:22 crc kubenswrapper[4784]: I0106 08:35:22.505080 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-b76bcf676-6p7dt" podUID="1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" containerName="barbican-api-log" containerID="cri-o://9ce5d0551db59c71071e6ebc7c18e8c4f0493f3e5840e492125d1c819ee210cd" gracePeriod=30 Jan 06 08:35:22 crc kubenswrapper[4784]: I0106 08:35:22.505573 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-b76bcf676-6p7dt" podUID="1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" containerName="barbican-api" containerID="cri-o://b30ca686fe1b2cd1924ba1613fd13d7f27b3cf8349fcdf5ba3d28e9bc462213b" gracePeriod=30 Jan 06 08:35:23 crc kubenswrapper[4784]: I0106 08:35:23.322917 4784 generic.go:334] "Generic (PLEG): container finished" podID="1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" containerID="9ce5d0551db59c71071e6ebc7c18e8c4f0493f3e5840e492125d1c819ee210cd" exitCode=143 Jan 06 08:35:23 crc kubenswrapper[4784]: I0106 08:35:23.323008 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-b76bcf676-6p7dt" event={"ID":"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8","Type":"ContainerDied","Data":"9ce5d0551db59c71071e6ebc7c18e8c4f0493f3e5840e492125d1c819ee210cd"} Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.337170 4784 generic.go:334] "Generic (PLEG): container finished" podID="3330c8ca-b383-4a37-91c9-62714dfd73f6" containerID="ef5f267b643f94c633862d8c654534de8df6ec2d093ffa0ca67b818724411b62" exitCode=0 Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.337226 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"3330c8ca-b383-4a37-91c9-62714dfd73f6","Type":"ContainerDied","Data":"ef5f267b643f94c633862d8c654534de8df6ec2d093ffa0ca67b818724411b62"} Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.459284 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.558098 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnkdt\" (UniqueName: \"kubernetes.io/projected/3330c8ca-b383-4a37-91c9-62714dfd73f6-kube-api-access-gnkdt\") pod \"3330c8ca-b383-4a37-91c9-62714dfd73f6\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.558221 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-combined-ca-bundle\") pod \"3330c8ca-b383-4a37-91c9-62714dfd73f6\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.558285 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-scripts\") pod \"3330c8ca-b383-4a37-91c9-62714dfd73f6\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.558434 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-config-data-custom\") pod \"3330c8ca-b383-4a37-91c9-62714dfd73f6\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.558489 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3330c8ca-b383-4a37-91c9-62714dfd73f6-etc-machine-id\") pod \"3330c8ca-b383-4a37-91c9-62714dfd73f6\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.558617 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-config-data\") pod \"3330c8ca-b383-4a37-91c9-62714dfd73f6\" (UID: \"3330c8ca-b383-4a37-91c9-62714dfd73f6\") " Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.558892 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3330c8ca-b383-4a37-91c9-62714dfd73f6-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "3330c8ca-b383-4a37-91c9-62714dfd73f6" (UID: "3330c8ca-b383-4a37-91c9-62714dfd73f6"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.559220 4784 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3330c8ca-b383-4a37-91c9-62714dfd73f6-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.565759 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-scripts" (OuterVolumeSpecName: "scripts") pod "3330c8ca-b383-4a37-91c9-62714dfd73f6" (UID: "3330c8ca-b383-4a37-91c9-62714dfd73f6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.578206 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3330c8ca-b383-4a37-91c9-62714dfd73f6" (UID: "3330c8ca-b383-4a37-91c9-62714dfd73f6"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.578311 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3330c8ca-b383-4a37-91c9-62714dfd73f6-kube-api-access-gnkdt" (OuterVolumeSpecName: "kube-api-access-gnkdt") pod "3330c8ca-b383-4a37-91c9-62714dfd73f6" (UID: "3330c8ca-b383-4a37-91c9-62714dfd73f6"). InnerVolumeSpecName "kube-api-access-gnkdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.613908 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3330c8ca-b383-4a37-91c9-62714dfd73f6" (UID: "3330c8ca-b383-4a37-91c9-62714dfd73f6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.662165 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnkdt\" (UniqueName: \"kubernetes.io/projected/3330c8ca-b383-4a37-91c9-62714dfd73f6-kube-api-access-gnkdt\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.662196 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.662206 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.662217 4784 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.663657 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-config-data" (OuterVolumeSpecName: "config-data") pod "3330c8ca-b383-4a37-91c9-62714dfd73f6" (UID: "3330c8ca-b383-4a37-91c9-62714dfd73f6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:24 crc kubenswrapper[4784]: I0106 08:35:24.764129 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3330c8ca-b383-4a37-91c9-62714dfd73f6-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.350885 4784 generic.go:334] "Generic (PLEG): container finished" podID="98f474cd-143e-4f7b-8269-eca3e3c1b6b0" containerID="71de650cad0dde73291bd080b9ece2d17f9c3a722f41f857e748759314ce259a" exitCode=0 Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.351332 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-646fd5ff66-md6q9" event={"ID":"98f474cd-143e-4f7b-8269-eca3e3c1b6b0","Type":"ContainerDied","Data":"71de650cad0dde73291bd080b9ece2d17f9c3a722f41f857e748759314ce259a"} Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.352933 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"3330c8ca-b383-4a37-91c9-62714dfd73f6","Type":"ContainerDied","Data":"21ffcf2d7d841ea59cb668d8a65cc3feff01c504e6706313e9e662bdeecb9943"} Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.352963 4784 scope.go:117] "RemoveContainer" containerID="633bbb3d795fd90d6b558760f6c67721d632afb6f90363aa001a5f84fc707951" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.353122 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.426953 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.431200 4784 scope.go:117] "RemoveContainer" containerID="ef5f267b643f94c633862d8c654534de8df6ec2d093ffa0ca67b818724411b62" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.440237 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.448759 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 06 08:35:25 crc kubenswrapper[4784]: E0106 08:35:25.449282 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3330c8ca-b383-4a37-91c9-62714dfd73f6" containerName="probe" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.449304 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3330c8ca-b383-4a37-91c9-62714dfd73f6" containerName="probe" Jan 06 08:35:25 crc kubenswrapper[4784]: E0106 08:35:25.449362 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3330c8ca-b383-4a37-91c9-62714dfd73f6" containerName="cinder-scheduler" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.449370 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3330c8ca-b383-4a37-91c9-62714dfd73f6" containerName="cinder-scheduler" Jan 06 08:35:25 crc kubenswrapper[4784]: E0106 08:35:25.449379 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb378c55-6158-463a-8c26-eaef2c173cc5" containerName="init" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.449386 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb378c55-6158-463a-8c26-eaef2c173cc5" containerName="init" Jan 06 08:35:25 crc kubenswrapper[4784]: E0106 08:35:25.449397 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb378c55-6158-463a-8c26-eaef2c173cc5" containerName="dnsmasq-dns" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.449403 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb378c55-6158-463a-8c26-eaef2c173cc5" containerName="dnsmasq-dns" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.449601 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb378c55-6158-463a-8c26-eaef2c173cc5" containerName="dnsmasq-dns" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.449622 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="3330c8ca-b383-4a37-91c9-62714dfd73f6" containerName="cinder-scheduler" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.449636 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="3330c8ca-b383-4a37-91c9-62714dfd73f6" containerName="probe" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.450789 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.457278 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.463224 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.484173 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-scripts\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.484261 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dm6ht\" (UniqueName: \"kubernetes.io/projected/162189cc-1d37-4526-b83c-f36183f40b49-kube-api-access-dm6ht\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.484282 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/162189cc-1d37-4526-b83c-f36183f40b49-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.484522 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-config-data\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.484609 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.484731 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.517086 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.586945 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-combined-ca-bundle\") pod \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.587141 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-config\") pod \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.587300 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xr6p2\" (UniqueName: \"kubernetes.io/projected/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-kube-api-access-xr6p2\") pod \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.587322 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-ovndb-tls-certs\") pod \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.587353 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-httpd-config\") pod \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\" (UID: \"98f474cd-143e-4f7b-8269-eca3e3c1b6b0\") " Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.587610 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/162189cc-1d37-4526-b83c-f36183f40b49-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.587720 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-config-data\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.587749 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.587790 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.587848 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-scripts\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.587884 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dm6ht\" (UniqueName: \"kubernetes.io/projected/162189cc-1d37-4526-b83c-f36183f40b49-kube-api-access-dm6ht\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.591060 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/162189cc-1d37-4526-b83c-f36183f40b49-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.596042 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-scripts\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.597958 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-config-data\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.598075 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "98f474cd-143e-4f7b-8269-eca3e3c1b6b0" (UID: "98f474cd-143e-4f7b-8269-eca3e3c1b6b0"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.604773 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.609532 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-kube-api-access-xr6p2" (OuterVolumeSpecName: "kube-api-access-xr6p2") pod "98f474cd-143e-4f7b-8269-eca3e3c1b6b0" (UID: "98f474cd-143e-4f7b-8269-eca3e3c1b6b0"). InnerVolumeSpecName "kube-api-access-xr6p2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.610391 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.627244 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dm6ht\" (UniqueName: \"kubernetes.io/projected/162189cc-1d37-4526-b83c-f36183f40b49-kube-api-access-dm6ht\") pod \"cinder-scheduler-0\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " pod="openstack/cinder-scheduler-0" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.671145 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-config" (OuterVolumeSpecName: "config") pod "98f474cd-143e-4f7b-8269-eca3e3c1b6b0" (UID: "98f474cd-143e-4f7b-8269-eca3e3c1b6b0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.680714 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "98f474cd-143e-4f7b-8269-eca3e3c1b6b0" (UID: "98f474cd-143e-4f7b-8269-eca3e3c1b6b0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.690306 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xr6p2\" (UniqueName: \"kubernetes.io/projected/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-kube-api-access-xr6p2\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.690347 4784 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.690359 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.690369 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.717738 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "98f474cd-143e-4f7b-8269-eca3e3c1b6b0" (UID: "98f474cd-143e-4f7b-8269-eca3e3c1b6b0"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.792724 4784 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/98f474cd-143e-4f7b-8269-eca3e3c1b6b0-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:25 crc kubenswrapper[4784]: I0106 08:35:25.829101 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.351212 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3330c8ca-b383-4a37-91c9-62714dfd73f6" path="/var/lib/kubelet/pods/3330c8ca-b383-4a37-91c9-62714dfd73f6/volumes" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.379315 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.387832 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-646fd5ff66-md6q9" event={"ID":"98f474cd-143e-4f7b-8269-eca3e3c1b6b0","Type":"ContainerDied","Data":"ee514d52ca3bcc001361d3fb34dab619eb21ccd57fb8b3f0226a1fa0d5e3a0af"} Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.387898 4784 scope.go:117] "RemoveContainer" containerID="5c0f8bf209994f84889c0f824711560eeddf5feb2740b5e35a651ac009c166d4" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.388027 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-646fd5ff66-md6q9" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.400965 4784 generic.go:334] "Generic (PLEG): container finished" podID="1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" containerID="b30ca686fe1b2cd1924ba1613fd13d7f27b3cf8349fcdf5ba3d28e9bc462213b" exitCode=0 Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.401013 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-b76bcf676-6p7dt" event={"ID":"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8","Type":"ContainerDied","Data":"b30ca686fe1b2cd1924ba1613fd13d7f27b3cf8349fcdf5ba3d28e9bc462213b"} Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.401047 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-b76bcf676-6p7dt" event={"ID":"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8","Type":"ContainerDied","Data":"3627ae2167415c8a0626bad1071a385d691c52a50e7eff48ef96de8be9858efd"} Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.401110 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-b76bcf676-6p7dt" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.412664 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-logs\") pod \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.412771 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-config-data-custom\") pod \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.412807 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tsckw\" (UniqueName: \"kubernetes.io/projected/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-kube-api-access-tsckw\") pod \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.412832 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-config-data\") pod \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.412875 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-combined-ca-bundle\") pod \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\" (UID: \"1c9ed1de-b7d4-42e3-aae2-023f704b3ed8\") " Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.414947 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-logs" (OuterVolumeSpecName: "logs") pod "1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" (UID: "1c9ed1de-b7d4-42e3-aae2-023f704b3ed8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.426805 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-kube-api-access-tsckw" (OuterVolumeSpecName: "kube-api-access-tsckw") pod "1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" (UID: "1c9ed1de-b7d4-42e3-aae2-023f704b3ed8"). InnerVolumeSpecName "kube-api-access-tsckw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.462822 4784 scope.go:117] "RemoveContainer" containerID="71de650cad0dde73291bd080b9ece2d17f9c3a722f41f857e748759314ce259a" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.467205 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" (UID: "1c9ed1de-b7d4-42e3-aae2-023f704b3ed8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.516952 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.516991 4784 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.517002 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tsckw\" (UniqueName: \"kubernetes.io/projected/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-kube-api-access-tsckw\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.542040 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.543728 4784 scope.go:117] "RemoveContainer" containerID="b30ca686fe1b2cd1924ba1613fd13d7f27b3cf8349fcdf5ba3d28e9bc462213b" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.578148 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" (UID: "1c9ed1de-b7d4-42e3-aae2-023f704b3ed8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.586647 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-config-data" (OuterVolumeSpecName: "config-data") pod "1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" (UID: "1c9ed1de-b7d4-42e3-aae2-023f704b3ed8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.602611 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-646fd5ff66-md6q9"] Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.605352 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-646fd5ff66-md6q9"] Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.609926 4784 scope.go:117] "RemoveContainer" containerID="9ce5d0551db59c71071e6ebc7c18e8c4f0493f3e5840e492125d1c819ee210cd" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.618950 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.618986 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.644104 4784 scope.go:117] "RemoveContainer" containerID="b30ca686fe1b2cd1924ba1613fd13d7f27b3cf8349fcdf5ba3d28e9bc462213b" Jan 06 08:35:26 crc kubenswrapper[4784]: E0106 08:35:26.644956 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b30ca686fe1b2cd1924ba1613fd13d7f27b3cf8349fcdf5ba3d28e9bc462213b\": container with ID starting with b30ca686fe1b2cd1924ba1613fd13d7f27b3cf8349fcdf5ba3d28e9bc462213b not found: ID does not exist" containerID="b30ca686fe1b2cd1924ba1613fd13d7f27b3cf8349fcdf5ba3d28e9bc462213b" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.644996 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b30ca686fe1b2cd1924ba1613fd13d7f27b3cf8349fcdf5ba3d28e9bc462213b"} err="failed to get container status \"b30ca686fe1b2cd1924ba1613fd13d7f27b3cf8349fcdf5ba3d28e9bc462213b\": rpc error: code = NotFound desc = could not find container \"b30ca686fe1b2cd1924ba1613fd13d7f27b3cf8349fcdf5ba3d28e9bc462213b\": container with ID starting with b30ca686fe1b2cd1924ba1613fd13d7f27b3cf8349fcdf5ba3d28e9bc462213b not found: ID does not exist" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.645022 4784 scope.go:117] "RemoveContainer" containerID="9ce5d0551db59c71071e6ebc7c18e8c4f0493f3e5840e492125d1c819ee210cd" Jan 06 08:35:26 crc kubenswrapper[4784]: E0106 08:35:26.645947 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ce5d0551db59c71071e6ebc7c18e8c4f0493f3e5840e492125d1c819ee210cd\": container with ID starting with 9ce5d0551db59c71071e6ebc7c18e8c4f0493f3e5840e492125d1c819ee210cd not found: ID does not exist" containerID="9ce5d0551db59c71071e6ebc7c18e8c4f0493f3e5840e492125d1c819ee210cd" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.645979 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ce5d0551db59c71071e6ebc7c18e8c4f0493f3e5840e492125d1c819ee210cd"} err="failed to get container status \"9ce5d0551db59c71071e6ebc7c18e8c4f0493f3e5840e492125d1c819ee210cd\": rpc error: code = NotFound desc = could not find container \"9ce5d0551db59c71071e6ebc7c18e8c4f0493f3e5840e492125d1c819ee210cd\": container with ID starting with 9ce5d0551db59c71071e6ebc7c18e8c4f0493f3e5840e492125d1c819ee210cd not found: ID does not exist" Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.773727 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-b76bcf676-6p7dt"] Jan 06 08:35:26 crc kubenswrapper[4784]: I0106 08:35:26.792532 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-b76bcf676-6p7dt"] Jan 06 08:35:27 crc kubenswrapper[4784]: I0106 08:35:27.442198 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"162189cc-1d37-4526-b83c-f36183f40b49","Type":"ContainerStarted","Data":"5d81264728e21f0de5897f691f16a0a600c9fc6f290f4b6c5b4cf42420927001"} Jan 06 08:35:27 crc kubenswrapper[4784]: I0106 08:35:27.442645 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"162189cc-1d37-4526-b83c-f36183f40b49","Type":"ContainerStarted","Data":"4de6166ea7bf2f0199cb7a4fddb9849d8c440f2ecf852e061c868b05bca6d4e1"} Jan 06 08:35:27 crc kubenswrapper[4784]: I0106 08:35:27.827630 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:35:28 crc kubenswrapper[4784]: I0106 08:35:28.325136 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" path="/var/lib/kubelet/pods/1c9ed1de-b7d4-42e3-aae2-023f704b3ed8/volumes" Jan 06 08:35:28 crc kubenswrapper[4784]: I0106 08:35:28.326240 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98f474cd-143e-4f7b-8269-eca3e3c1b6b0" path="/var/lib/kubelet/pods/98f474cd-143e-4f7b-8269-eca3e3c1b6b0/volumes" Jan 06 08:35:28 crc kubenswrapper[4784]: I0106 08:35:28.410239 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 06 08:35:28 crc kubenswrapper[4784]: I0106 08:35:28.467193 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"162189cc-1d37-4526-b83c-f36183f40b49","Type":"ContainerStarted","Data":"254f5436d55af633a01d076f3a43e18f1370e7e15307f5c1d0c79c703303c8f9"} Jan 06 08:35:28 crc kubenswrapper[4784]: I0106 08:35:28.526492 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.526468172 podStartE2EDuration="3.526468172s" podCreationTimestamp="2026-01-06 08:35:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:35:28.521105895 +0000 UTC m=+1230.567278732" watchObservedRunningTime="2026-01-06 08:35:28.526468172 +0000 UTC m=+1230.572640999" Jan 06 08:35:30 crc kubenswrapper[4784]: I0106 08:35:30.830173 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 06 08:35:31 crc kubenswrapper[4784]: I0106 08:35:31.960031 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 06 08:35:31 crc kubenswrapper[4784]: E0106 08:35:31.960997 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" containerName="barbican-api" Jan 06 08:35:31 crc kubenswrapper[4784]: I0106 08:35:31.961015 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" containerName="barbican-api" Jan 06 08:35:31 crc kubenswrapper[4784]: E0106 08:35:31.961034 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98f474cd-143e-4f7b-8269-eca3e3c1b6b0" containerName="neutron-api" Jan 06 08:35:31 crc kubenswrapper[4784]: I0106 08:35:31.961042 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="98f474cd-143e-4f7b-8269-eca3e3c1b6b0" containerName="neutron-api" Jan 06 08:35:31 crc kubenswrapper[4784]: E0106 08:35:31.961073 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98f474cd-143e-4f7b-8269-eca3e3c1b6b0" containerName="neutron-httpd" Jan 06 08:35:31 crc kubenswrapper[4784]: I0106 08:35:31.961081 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="98f474cd-143e-4f7b-8269-eca3e3c1b6b0" containerName="neutron-httpd" Jan 06 08:35:31 crc kubenswrapper[4784]: E0106 08:35:31.961116 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" containerName="barbican-api-log" Jan 06 08:35:31 crc kubenswrapper[4784]: I0106 08:35:31.961125 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" containerName="barbican-api-log" Jan 06 08:35:31 crc kubenswrapper[4784]: I0106 08:35:31.961359 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="98f474cd-143e-4f7b-8269-eca3e3c1b6b0" containerName="neutron-api" Jan 06 08:35:31 crc kubenswrapper[4784]: I0106 08:35:31.961375 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" containerName="barbican-api" Jan 06 08:35:31 crc kubenswrapper[4784]: I0106 08:35:31.961396 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c9ed1de-b7d4-42e3-aae2-023f704b3ed8" containerName="barbican-api-log" Jan 06 08:35:31 crc kubenswrapper[4784]: I0106 08:35:31.961410 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="98f474cd-143e-4f7b-8269-eca3e3c1b6b0" containerName="neutron-httpd" Jan 06 08:35:31 crc kubenswrapper[4784]: I0106 08:35:31.962219 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 06 08:35:31 crc kubenswrapper[4784]: I0106 08:35:31.965118 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-wfbf5" Jan 06 08:35:31 crc kubenswrapper[4784]: I0106 08:35:31.965289 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 06 08:35:31 crc kubenswrapper[4784]: I0106 08:35:31.965933 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 06 08:35:31 crc kubenswrapper[4784]: I0106 08:35:31.978331 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 06 08:35:32 crc kubenswrapper[4784]: I0106 08:35:32.081454 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d1dc9219-aca3-47c5-b8f7-37799235c2a9-openstack-config-secret\") pod \"openstackclient\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " pod="openstack/openstackclient" Jan 06 08:35:32 crc kubenswrapper[4784]: I0106 08:35:32.081530 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1dc9219-aca3-47c5-b8f7-37799235c2a9-combined-ca-bundle\") pod \"openstackclient\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " pod="openstack/openstackclient" Jan 06 08:35:32 crc kubenswrapper[4784]: I0106 08:35:32.081659 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2t6d2\" (UniqueName: \"kubernetes.io/projected/d1dc9219-aca3-47c5-b8f7-37799235c2a9-kube-api-access-2t6d2\") pod \"openstackclient\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " pod="openstack/openstackclient" Jan 06 08:35:32 crc kubenswrapper[4784]: I0106 08:35:32.081785 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d1dc9219-aca3-47c5-b8f7-37799235c2a9-openstack-config\") pod \"openstackclient\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " pod="openstack/openstackclient" Jan 06 08:35:32 crc kubenswrapper[4784]: I0106 08:35:32.183794 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1dc9219-aca3-47c5-b8f7-37799235c2a9-combined-ca-bundle\") pod \"openstackclient\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " pod="openstack/openstackclient" Jan 06 08:35:32 crc kubenswrapper[4784]: I0106 08:35:32.183892 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2t6d2\" (UniqueName: \"kubernetes.io/projected/d1dc9219-aca3-47c5-b8f7-37799235c2a9-kube-api-access-2t6d2\") pod \"openstackclient\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " pod="openstack/openstackclient" Jan 06 08:35:32 crc kubenswrapper[4784]: I0106 08:35:32.183951 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d1dc9219-aca3-47c5-b8f7-37799235c2a9-openstack-config\") pod \"openstackclient\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " pod="openstack/openstackclient" Jan 06 08:35:32 crc kubenswrapper[4784]: I0106 08:35:32.184006 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d1dc9219-aca3-47c5-b8f7-37799235c2a9-openstack-config-secret\") pod \"openstackclient\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " pod="openstack/openstackclient" Jan 06 08:35:32 crc kubenswrapper[4784]: I0106 08:35:32.185315 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d1dc9219-aca3-47c5-b8f7-37799235c2a9-openstack-config\") pod \"openstackclient\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " pod="openstack/openstackclient" Jan 06 08:35:32 crc kubenswrapper[4784]: I0106 08:35:32.192705 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d1dc9219-aca3-47c5-b8f7-37799235c2a9-openstack-config-secret\") pod \"openstackclient\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " pod="openstack/openstackclient" Jan 06 08:35:32 crc kubenswrapper[4784]: I0106 08:35:32.197813 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1dc9219-aca3-47c5-b8f7-37799235c2a9-combined-ca-bundle\") pod \"openstackclient\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " pod="openstack/openstackclient" Jan 06 08:35:32 crc kubenswrapper[4784]: I0106 08:35:32.210439 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2t6d2\" (UniqueName: \"kubernetes.io/projected/d1dc9219-aca3-47c5-b8f7-37799235c2a9-kube-api-access-2t6d2\") pod \"openstackclient\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " pod="openstack/openstackclient" Jan 06 08:35:32 crc kubenswrapper[4784]: I0106 08:35:32.284315 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 06 08:35:32 crc kubenswrapper[4784]: I0106 08:35:32.873319 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 06 08:35:32 crc kubenswrapper[4784]: W0106 08:35:32.908941 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd1dc9219_aca3_47c5_b8f7_37799235c2a9.slice/crio-b5d69520c8a7958d79a2317bef98d2209898acd8408348a29c8218a2e926b2fb WatchSource:0}: Error finding container b5d69520c8a7958d79a2317bef98d2209898acd8408348a29c8218a2e926b2fb: Status 404 returned error can't find the container with id b5d69520c8a7958d79a2317bef98d2209898acd8408348a29c8218a2e926b2fb Jan 06 08:35:33 crc kubenswrapper[4784]: I0106 08:35:33.523304 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"d1dc9219-aca3-47c5-b8f7-37799235c2a9","Type":"ContainerStarted","Data":"b5d69520c8a7958d79a2317bef98d2209898acd8408348a29c8218a2e926b2fb"} Jan 06 08:35:33 crc kubenswrapper[4784]: I0106 08:35:33.911068 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-ddd99f6b5-9vfkd"] Jan 06 08:35:33 crc kubenswrapper[4784]: I0106 08:35:33.913415 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:33 crc kubenswrapper[4784]: I0106 08:35:33.919215 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 06 08:35:33 crc kubenswrapper[4784]: I0106 08:35:33.919536 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 06 08:35:33 crc kubenswrapper[4784]: I0106 08:35:33.919748 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 06 08:35:33 crc kubenswrapper[4784]: I0106 08:35:33.947429 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-ddd99f6b5-9vfkd"] Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.042902 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-internal-tls-certs\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.043012 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-log-httpd\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.043051 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55xrm\" (UniqueName: \"kubernetes.io/projected/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-kube-api-access-55xrm\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.043085 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-run-httpd\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.043118 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-combined-ca-bundle\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.043201 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-config-data\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.043301 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-etc-swift\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.043361 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-public-tls-certs\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.145048 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-log-httpd\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.145125 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55xrm\" (UniqueName: \"kubernetes.io/projected/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-kube-api-access-55xrm\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.145163 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-run-httpd\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.145198 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-combined-ca-bundle\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.145220 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-config-data\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.145301 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-etc-swift\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.145332 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-public-tls-certs\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.145352 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-internal-tls-certs\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.145625 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-log-httpd\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.146585 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-run-httpd\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.154018 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-combined-ca-bundle\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.154068 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-internal-tls-certs\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.155164 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-public-tls-certs\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.155488 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-config-data\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.159318 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-etc-swift\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.164491 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55xrm\" (UniqueName: \"kubernetes.io/projected/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-kube-api-access-55xrm\") pod \"swift-proxy-ddd99f6b5-9vfkd\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.246688 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:34 crc kubenswrapper[4784]: I0106 08:35:34.886908 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-ddd99f6b5-9vfkd"] Jan 06 08:35:35 crc kubenswrapper[4784]: I0106 08:35:35.549836 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" event={"ID":"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2","Type":"ContainerStarted","Data":"bc9bbb81f969e4ccb4a50e67c4b725fd7aaad864e06456377517ca86818bce99"} Jan 06 08:35:35 crc kubenswrapper[4784]: I0106 08:35:35.550334 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" event={"ID":"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2","Type":"ContainerStarted","Data":"8885e8dca6c2747b87500b9c0f62e03258c8d67a971af2cba425f6a3b53371bf"} Jan 06 08:35:35 crc kubenswrapper[4784]: I0106 08:35:35.550354 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" event={"ID":"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2","Type":"ContainerStarted","Data":"4a006b1e8f0548fd4e4398d39d25d3b707e9b4348252c4a228e450fc2f8e8164"} Jan 06 08:35:35 crc kubenswrapper[4784]: I0106 08:35:35.550399 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:35 crc kubenswrapper[4784]: I0106 08:35:35.550424 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:35 crc kubenswrapper[4784]: I0106 08:35:35.576747 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" podStartSLOduration=2.576718875 podStartE2EDuration="2.576718875s" podCreationTimestamp="2026-01-06 08:35:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:35:35.572827195 +0000 UTC m=+1237.619000052" watchObservedRunningTime="2026-01-06 08:35:35.576718875 +0000 UTC m=+1237.622891752" Jan 06 08:35:36 crc kubenswrapper[4784]: I0106 08:35:36.023933 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:36 crc kubenswrapper[4784]: I0106 08:35:36.024266 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="ceilometer-central-agent" containerID="cri-o://1279849ab069a3461cdc6028c4f6c0dba4c4f0ff761e3f67305a9a23324eb11e" gracePeriod=30 Jan 06 08:35:36 crc kubenswrapper[4784]: I0106 08:35:36.025351 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="proxy-httpd" containerID="cri-o://6da0629a64f51120a10ec58ba38aa00af272b821093ed0733990d728e7347bae" gracePeriod=30 Jan 06 08:35:36 crc kubenswrapper[4784]: I0106 08:35:36.025414 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="sg-core" containerID="cri-o://e7ed9a9b7deb5129aee76284c1c87b49885fd897012779cc8a4098572d8ccf83" gracePeriod=30 Jan 06 08:35:36 crc kubenswrapper[4784]: I0106 08:35:36.025414 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="ceilometer-notification-agent" containerID="cri-o://62023330e700a944c57ae8d989939ab313b16d0c7816f13cf9fcafa3226cb8ea" gracePeriod=30 Jan 06 08:35:36 crc kubenswrapper[4784]: I0106 08:35:36.029910 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 06 08:35:36 crc kubenswrapper[4784]: I0106 08:35:36.156776 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 06 08:35:36 crc kubenswrapper[4784]: I0106 08:35:36.565890 4784 generic.go:334] "Generic (PLEG): container finished" podID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerID="6da0629a64f51120a10ec58ba38aa00af272b821093ed0733990d728e7347bae" exitCode=0 Jan 06 08:35:36 crc kubenswrapper[4784]: I0106 08:35:36.565930 4784 generic.go:334] "Generic (PLEG): container finished" podID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerID="e7ed9a9b7deb5129aee76284c1c87b49885fd897012779cc8a4098572d8ccf83" exitCode=2 Jan 06 08:35:36 crc kubenswrapper[4784]: I0106 08:35:36.565998 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b","Type":"ContainerDied","Data":"6da0629a64f51120a10ec58ba38aa00af272b821093ed0733990d728e7347bae"} Jan 06 08:35:36 crc kubenswrapper[4784]: I0106 08:35:36.566069 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b","Type":"ContainerDied","Data":"e7ed9a9b7deb5129aee76284c1c87b49885fd897012779cc8a4098572d8ccf83"} Jan 06 08:35:37 crc kubenswrapper[4784]: I0106 08:35:37.585273 4784 generic.go:334] "Generic (PLEG): container finished" podID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerID="62023330e700a944c57ae8d989939ab313b16d0c7816f13cf9fcafa3226cb8ea" exitCode=0 Jan 06 08:35:37 crc kubenswrapper[4784]: I0106 08:35:37.585787 4784 generic.go:334] "Generic (PLEG): container finished" podID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerID="1279849ab069a3461cdc6028c4f6c0dba4c4f0ff761e3f67305a9a23324eb11e" exitCode=0 Jan 06 08:35:37 crc kubenswrapper[4784]: I0106 08:35:37.585820 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b","Type":"ContainerDied","Data":"62023330e700a944c57ae8d989939ab313b16d0c7816f13cf9fcafa3226cb8ea"} Jan 06 08:35:37 crc kubenswrapper[4784]: I0106 08:35:37.585861 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b","Type":"ContainerDied","Data":"1279849ab069a3461cdc6028c4f6c0dba4c4f0ff761e3f67305a9a23324eb11e"} Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.021943 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.175947 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-log-httpd\") pod \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.176036 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-config-data\") pod \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.176090 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-sg-core-conf-yaml\") pod \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.176237 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-run-httpd\") pod \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.176267 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-combined-ca-bundle\") pod \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.176312 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-scripts\") pod \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.176406 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-47r2t\" (UniqueName: \"kubernetes.io/projected/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-kube-api-access-47r2t\") pod \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.177585 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" (UID: "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.177789 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" (UID: "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.184641 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-scripts" (OuterVolumeSpecName: "scripts") pod "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" (UID: "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.184682 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-kube-api-access-47r2t" (OuterVolumeSpecName: "kube-api-access-47r2t") pod "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" (UID: "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b"). InnerVolumeSpecName "kube-api-access-47r2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.214188 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" (UID: "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.254765 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" (UID: "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.277755 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-config-data" (OuterVolumeSpecName: "config-data") pod "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" (UID: "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.278691 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-config-data\") pod \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\" (UID: \"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b\") " Jan 06 08:35:38 crc kubenswrapper[4784]: W0106 08:35:38.278909 4784 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b/volumes/kubernetes.io~secret/config-data Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.278959 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-config-data" (OuterVolumeSpecName: "config-data") pod "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" (UID: "16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.279208 4784 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.279230 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.279242 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.279252 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-47r2t\" (UniqueName: \"kubernetes.io/projected/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-kube-api-access-47r2t\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.279262 4784 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.279270 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.279279 4784 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.603139 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b","Type":"ContainerDied","Data":"cf07473f89febf2955db2be9cdf9b8091d10f25a62d991dcebf4b454e9795a49"} Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.603207 4784 scope.go:117] "RemoveContainer" containerID="6da0629a64f51120a10ec58ba38aa00af272b821093ed0733990d728e7347bae" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.603377 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.629515 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.638194 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.645740 4784 scope.go:117] "RemoveContainer" containerID="e7ed9a9b7deb5129aee76284c1c87b49885fd897012779cc8a4098572d8ccf83" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.667057 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:38 crc kubenswrapper[4784]: E0106 08:35:38.667887 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="sg-core" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.667975 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="sg-core" Jan 06 08:35:38 crc kubenswrapper[4784]: E0106 08:35:38.668080 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="proxy-httpd" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.668138 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="proxy-httpd" Jan 06 08:35:38 crc kubenswrapper[4784]: E0106 08:35:38.668211 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="ceilometer-central-agent" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.668265 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="ceilometer-central-agent" Jan 06 08:35:38 crc kubenswrapper[4784]: E0106 08:35:38.668326 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="ceilometer-notification-agent" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.668382 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="ceilometer-notification-agent" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.668620 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="proxy-httpd" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.668696 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="ceilometer-central-agent" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.668758 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="ceilometer-notification-agent" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.668842 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" containerName="sg-core" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.671636 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.676068 4784 scope.go:117] "RemoveContainer" containerID="62023330e700a944c57ae8d989939ab313b16d0c7816f13cf9fcafa3226cb8ea" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.679764 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.683366 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.685748 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.788409 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/93900793-e964-4665-8d8b-7a7dafc3652d-run-httpd\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.788533 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.788592 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkwmh\" (UniqueName: \"kubernetes.io/projected/93900793-e964-4665-8d8b-7a7dafc3652d-kube-api-access-nkwmh\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.788628 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/93900793-e964-4665-8d8b-7a7dafc3652d-log-httpd\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.788665 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-config-data\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.788885 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-scripts\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.788971 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.890644 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.890718 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkwmh\" (UniqueName: \"kubernetes.io/projected/93900793-e964-4665-8d8b-7a7dafc3652d-kube-api-access-nkwmh\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.890757 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/93900793-e964-4665-8d8b-7a7dafc3652d-log-httpd\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.890800 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-config-data\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.890845 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-scripts\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.890871 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.890915 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/93900793-e964-4665-8d8b-7a7dafc3652d-run-httpd\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.892202 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/93900793-e964-4665-8d8b-7a7dafc3652d-log-httpd\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.893215 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/93900793-e964-4665-8d8b-7a7dafc3652d-run-httpd\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.898267 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-scripts\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.899754 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.902787 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.907072 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-config-data\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.907388 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkwmh\" (UniqueName: \"kubernetes.io/projected/93900793-e964-4665-8d8b-7a7dafc3652d-kube-api-access-nkwmh\") pod \"ceilometer-0\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " pod="openstack/ceilometer-0" Jan 06 08:35:38 crc kubenswrapper[4784]: I0106 08:35:38.994471 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:35:39 crc kubenswrapper[4784]: I0106 08:35:39.248789 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:40 crc kubenswrapper[4784]: I0106 08:35:40.323854 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b" path="/var/lib/kubelet/pods/16f2b65e-5208-4cd7-a7eb-bdd52bb53e3b/volumes" Jan 06 08:35:44 crc kubenswrapper[4784]: I0106 08:35:44.253715 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:44 crc kubenswrapper[4784]: I0106 08:35:44.254617 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:35:45 crc kubenswrapper[4784]: I0106 08:35:45.466940 4784 scope.go:117] "RemoveContainer" containerID="1279849ab069a3461cdc6028c4f6c0dba4c4f0ff761e3f67305a9a23324eb11e" Jan 06 08:35:46 crc kubenswrapper[4784]: I0106 08:35:46.159742 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:46 crc kubenswrapper[4784]: W0106 08:35:46.172586 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod93900793_e964_4665_8d8b_7a7dafc3652d.slice/crio-eba22092fd80a98774379cdb9df635dee13418b1cb2c6b34ddab7c6bf325ff6c WatchSource:0}: Error finding container eba22092fd80a98774379cdb9df635dee13418b1cb2c6b34ddab7c6bf325ff6c: Status 404 returned error can't find the container with id eba22092fd80a98774379cdb9df635dee13418b1cb2c6b34ddab7c6bf325ff6c Jan 06 08:35:46 crc kubenswrapper[4784]: I0106 08:35:46.737413 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"d1dc9219-aca3-47c5-b8f7-37799235c2a9","Type":"ContainerStarted","Data":"b858b3f1e24015ec6ea11e61a8507c959309d301f0e42d8b7abf79701332e44f"} Jan 06 08:35:46 crc kubenswrapper[4784]: I0106 08:35:46.741255 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"93900793-e964-4665-8d8b-7a7dafc3652d","Type":"ContainerStarted","Data":"eba22092fd80a98774379cdb9df635dee13418b1cb2c6b34ddab7c6bf325ff6c"} Jan 06 08:35:46 crc kubenswrapper[4784]: I0106 08:35:46.764152 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.129879803 podStartE2EDuration="15.764134451s" podCreationTimestamp="2026-01-06 08:35:31 +0000 UTC" firstStartedPulling="2026-01-06 08:35:32.912513676 +0000 UTC m=+1234.958686513" lastFinishedPulling="2026-01-06 08:35:45.546768324 +0000 UTC m=+1247.592941161" observedRunningTime="2026-01-06 08:35:46.759075434 +0000 UTC m=+1248.805248281" watchObservedRunningTime="2026-01-06 08:35:46.764134451 +0000 UTC m=+1248.810307278" Jan 06 08:35:47 crc kubenswrapper[4784]: I0106 08:35:47.753067 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"93900793-e964-4665-8d8b-7a7dafc3652d","Type":"ContainerStarted","Data":"e103569a68bc3df3344544a89d4f62943f452c3f5a46a6e83ca83cd04df94db5"} Jan 06 08:35:47 crc kubenswrapper[4784]: I0106 08:35:47.753416 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"93900793-e964-4665-8d8b-7a7dafc3652d","Type":"ContainerStarted","Data":"fa4f624305789bdc7c5eab95920d6cbcde6cab7a62a2b6c1f38605685bc60cd5"} Jan 06 08:35:48 crc kubenswrapper[4784]: I0106 08:35:48.764258 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"93900793-e964-4665-8d8b-7a7dafc3652d","Type":"ContainerStarted","Data":"b9af3d9746a8c69e9c9fc27f1bb3bb1810ce7e665587ba466c9798a81218f2e4"} Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.517650 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-8xwm9"] Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.519619 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8xwm9" Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.527374 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-8xwm9"] Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.700153 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5ec4933-ceca-4a4f-9206-72e01f451292-operator-scripts\") pod \"nova-api-db-create-8xwm9\" (UID: \"d5ec4933-ceca-4a4f-9206-72e01f451292\") " pod="openstack/nova-api-db-create-8xwm9" Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.700224 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z25zd\" (UniqueName: \"kubernetes.io/projected/d5ec4933-ceca-4a4f-9206-72e01f451292-kube-api-access-z25zd\") pod \"nova-api-db-create-8xwm9\" (UID: \"d5ec4933-ceca-4a4f-9206-72e01f451292\") " pod="openstack/nova-api-db-create-8xwm9" Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.725511 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-kxmhp"] Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.727059 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-kxmhp" Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.769967 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-kxmhp"] Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.801889 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5ec4933-ceca-4a4f-9206-72e01f451292-operator-scripts\") pod \"nova-api-db-create-8xwm9\" (UID: \"d5ec4933-ceca-4a4f-9206-72e01f451292\") " pod="openstack/nova-api-db-create-8xwm9" Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.802246 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z25zd\" (UniqueName: \"kubernetes.io/projected/d5ec4933-ceca-4a4f-9206-72e01f451292-kube-api-access-z25zd\") pod \"nova-api-db-create-8xwm9\" (UID: \"d5ec4933-ceca-4a4f-9206-72e01f451292\") " pod="openstack/nova-api-db-create-8xwm9" Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.803270 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5ec4933-ceca-4a4f-9206-72e01f451292-operator-scripts\") pod \"nova-api-db-create-8xwm9\" (UID: \"d5ec4933-ceca-4a4f-9206-72e01f451292\") " pod="openstack/nova-api-db-create-8xwm9" Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.851862 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z25zd\" (UniqueName: \"kubernetes.io/projected/d5ec4933-ceca-4a4f-9206-72e01f451292-kube-api-access-z25zd\") pod \"nova-api-db-create-8xwm9\" (UID: \"d5ec4933-ceca-4a4f-9206-72e01f451292\") " pod="openstack/nova-api-db-create-8xwm9" Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.857602 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-n9rlv"] Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.870394 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-n9rlv" Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.887955 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-n9rlv"] Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.913231 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/60288dfb-94e1-4aef-a67c-3ad3d457d124-operator-scripts\") pod \"nova-cell0-db-create-kxmhp\" (UID: \"60288dfb-94e1-4aef-a67c-3ad3d457d124\") " pod="openstack/nova-cell0-db-create-kxmhp" Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.913362 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dsmzh\" (UniqueName: \"kubernetes.io/projected/60288dfb-94e1-4aef-a67c-3ad3d457d124-kube-api-access-dsmzh\") pod \"nova-cell0-db-create-kxmhp\" (UID: \"60288dfb-94e1-4aef-a67c-3ad3d457d124\") " pod="openstack/nova-cell0-db-create-kxmhp" Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.946833 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-361f-account-create-update-wzgwj"] Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.950883 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-361f-account-create-update-wzgwj" Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.955888 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 06 08:35:50 crc kubenswrapper[4784]: I0106 08:35:50.965623 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-361f-account-create-update-wzgwj"] Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.016274 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqvsl\" (UniqueName: \"kubernetes.io/projected/776ae843-cb1c-4edd-9cb1-e7a9513e9aa5-kube-api-access-cqvsl\") pod \"nova-cell1-db-create-n9rlv\" (UID: \"776ae843-cb1c-4edd-9cb1-e7a9513e9aa5\") " pod="openstack/nova-cell1-db-create-n9rlv" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.016363 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/60288dfb-94e1-4aef-a67c-3ad3d457d124-operator-scripts\") pod \"nova-cell0-db-create-kxmhp\" (UID: \"60288dfb-94e1-4aef-a67c-3ad3d457d124\") " pod="openstack/nova-cell0-db-create-kxmhp" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.016402 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/776ae843-cb1c-4edd-9cb1-e7a9513e9aa5-operator-scripts\") pod \"nova-cell1-db-create-n9rlv\" (UID: \"776ae843-cb1c-4edd-9cb1-e7a9513e9aa5\") " pod="openstack/nova-cell1-db-create-n9rlv" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.016452 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dsmzh\" (UniqueName: \"kubernetes.io/projected/60288dfb-94e1-4aef-a67c-3ad3d457d124-kube-api-access-dsmzh\") pod \"nova-cell0-db-create-kxmhp\" (UID: \"60288dfb-94e1-4aef-a67c-3ad3d457d124\") " pod="openstack/nova-cell0-db-create-kxmhp" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.021278 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/60288dfb-94e1-4aef-a67c-3ad3d457d124-operator-scripts\") pod \"nova-cell0-db-create-kxmhp\" (UID: \"60288dfb-94e1-4aef-a67c-3ad3d457d124\") " pod="openstack/nova-cell0-db-create-kxmhp" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.066323 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsmzh\" (UniqueName: \"kubernetes.io/projected/60288dfb-94e1-4aef-a67c-3ad3d457d124-kube-api-access-dsmzh\") pod \"nova-cell0-db-create-kxmhp\" (UID: \"60288dfb-94e1-4aef-a67c-3ad3d457d124\") " pod="openstack/nova-cell0-db-create-kxmhp" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.071531 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-7e75-account-create-update-t2sj4"] Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.073627 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7e75-account-create-update-t2sj4" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.077857 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.102095 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-7e75-account-create-update-t2sj4"] Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.120602 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqvsl\" (UniqueName: \"kubernetes.io/projected/776ae843-cb1c-4edd-9cb1-e7a9513e9aa5-kube-api-access-cqvsl\") pod \"nova-cell1-db-create-n9rlv\" (UID: \"776ae843-cb1c-4edd-9cb1-e7a9513e9aa5\") " pod="openstack/nova-cell1-db-create-n9rlv" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.120682 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/776ae843-cb1c-4edd-9cb1-e7a9513e9aa5-operator-scripts\") pod \"nova-cell1-db-create-n9rlv\" (UID: \"776ae843-cb1c-4edd-9cb1-e7a9513e9aa5\") " pod="openstack/nova-cell1-db-create-n9rlv" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.120717 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f57eec82-c0b1-41de-8082-a096a3e73acc-operator-scripts\") pod \"nova-api-361f-account-create-update-wzgwj\" (UID: \"f57eec82-c0b1-41de-8082-a096a3e73acc\") " pod="openstack/nova-api-361f-account-create-update-wzgwj" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.120769 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xv2jd\" (UniqueName: \"kubernetes.io/projected/f57eec82-c0b1-41de-8082-a096a3e73acc-kube-api-access-xv2jd\") pod \"nova-api-361f-account-create-update-wzgwj\" (UID: \"f57eec82-c0b1-41de-8082-a096a3e73acc\") " pod="openstack/nova-api-361f-account-create-update-wzgwj" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.121738 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/776ae843-cb1c-4edd-9cb1-e7a9513e9aa5-operator-scripts\") pod \"nova-cell1-db-create-n9rlv\" (UID: \"776ae843-cb1c-4edd-9cb1-e7a9513e9aa5\") " pod="openstack/nova-cell1-db-create-n9rlv" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.140018 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqvsl\" (UniqueName: \"kubernetes.io/projected/776ae843-cb1c-4edd-9cb1-e7a9513e9aa5-kube-api-access-cqvsl\") pod \"nova-cell1-db-create-n9rlv\" (UID: \"776ae843-cb1c-4edd-9cb1-e7a9513e9aa5\") " pod="openstack/nova-cell1-db-create-n9rlv" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.140488 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8xwm9" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.219884 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-kxmhp" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.222012 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3da71d4b-c3fe-4b9e-b6a7-63ababb2632c-operator-scripts\") pod \"nova-cell0-7e75-account-create-update-t2sj4\" (UID: \"3da71d4b-c3fe-4b9e-b6a7-63ababb2632c\") " pod="openstack/nova-cell0-7e75-account-create-update-t2sj4" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.222070 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kszvm\" (UniqueName: \"kubernetes.io/projected/3da71d4b-c3fe-4b9e-b6a7-63ababb2632c-kube-api-access-kszvm\") pod \"nova-cell0-7e75-account-create-update-t2sj4\" (UID: \"3da71d4b-c3fe-4b9e-b6a7-63ababb2632c\") " pod="openstack/nova-cell0-7e75-account-create-update-t2sj4" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.222100 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f57eec82-c0b1-41de-8082-a096a3e73acc-operator-scripts\") pod \"nova-api-361f-account-create-update-wzgwj\" (UID: \"f57eec82-c0b1-41de-8082-a096a3e73acc\") " pod="openstack/nova-api-361f-account-create-update-wzgwj" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.222149 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xv2jd\" (UniqueName: \"kubernetes.io/projected/f57eec82-c0b1-41de-8082-a096a3e73acc-kube-api-access-xv2jd\") pod \"nova-api-361f-account-create-update-wzgwj\" (UID: \"f57eec82-c0b1-41de-8082-a096a3e73acc\") " pod="openstack/nova-api-361f-account-create-update-wzgwj" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.223071 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f57eec82-c0b1-41de-8082-a096a3e73acc-operator-scripts\") pod \"nova-api-361f-account-create-update-wzgwj\" (UID: \"f57eec82-c0b1-41de-8082-a096a3e73acc\") " pod="openstack/nova-api-361f-account-create-update-wzgwj" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.246077 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-2c71-account-create-update-sgvgl"] Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.250463 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2c71-account-create-update-sgvgl" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.252789 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-n9rlv" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.257908 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.264267 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xv2jd\" (UniqueName: \"kubernetes.io/projected/f57eec82-c0b1-41de-8082-a096a3e73acc-kube-api-access-xv2jd\") pod \"nova-api-361f-account-create-update-wzgwj\" (UID: \"f57eec82-c0b1-41de-8082-a096a3e73acc\") " pod="openstack/nova-api-361f-account-create-update-wzgwj" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.266798 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-2c71-account-create-update-sgvgl"] Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.276342 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-361f-account-create-update-wzgwj" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.326875 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3da71d4b-c3fe-4b9e-b6a7-63ababb2632c-operator-scripts\") pod \"nova-cell0-7e75-account-create-update-t2sj4\" (UID: \"3da71d4b-c3fe-4b9e-b6a7-63ababb2632c\") " pod="openstack/nova-cell0-7e75-account-create-update-t2sj4" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.326965 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kszvm\" (UniqueName: \"kubernetes.io/projected/3da71d4b-c3fe-4b9e-b6a7-63ababb2632c-kube-api-access-kszvm\") pod \"nova-cell0-7e75-account-create-update-t2sj4\" (UID: \"3da71d4b-c3fe-4b9e-b6a7-63ababb2632c\") " pod="openstack/nova-cell0-7e75-account-create-update-t2sj4" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.328656 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3da71d4b-c3fe-4b9e-b6a7-63ababb2632c-operator-scripts\") pod \"nova-cell0-7e75-account-create-update-t2sj4\" (UID: \"3da71d4b-c3fe-4b9e-b6a7-63ababb2632c\") " pod="openstack/nova-cell0-7e75-account-create-update-t2sj4" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.351142 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kszvm\" (UniqueName: \"kubernetes.io/projected/3da71d4b-c3fe-4b9e-b6a7-63ababb2632c-kube-api-access-kszvm\") pod \"nova-cell0-7e75-account-create-update-t2sj4\" (UID: \"3da71d4b-c3fe-4b9e-b6a7-63ababb2632c\") " pod="openstack/nova-cell0-7e75-account-create-update-t2sj4" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.426495 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7e75-account-create-update-t2sj4" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.429000 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5dwg\" (UniqueName: \"kubernetes.io/projected/778d4ca4-6cef-45a6-8870-657c2c578797-kube-api-access-t5dwg\") pod \"nova-cell1-2c71-account-create-update-sgvgl\" (UID: \"778d4ca4-6cef-45a6-8870-657c2c578797\") " pod="openstack/nova-cell1-2c71-account-create-update-sgvgl" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.429182 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/778d4ca4-6cef-45a6-8870-657c2c578797-operator-scripts\") pod \"nova-cell1-2c71-account-create-update-sgvgl\" (UID: \"778d4ca4-6cef-45a6-8870-657c2c578797\") " pod="openstack/nova-cell1-2c71-account-create-update-sgvgl" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.531859 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5dwg\" (UniqueName: \"kubernetes.io/projected/778d4ca4-6cef-45a6-8870-657c2c578797-kube-api-access-t5dwg\") pod \"nova-cell1-2c71-account-create-update-sgvgl\" (UID: \"778d4ca4-6cef-45a6-8870-657c2c578797\") " pod="openstack/nova-cell1-2c71-account-create-update-sgvgl" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.532002 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/778d4ca4-6cef-45a6-8870-657c2c578797-operator-scripts\") pod \"nova-cell1-2c71-account-create-update-sgvgl\" (UID: \"778d4ca4-6cef-45a6-8870-657c2c578797\") " pod="openstack/nova-cell1-2c71-account-create-update-sgvgl" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.535502 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/778d4ca4-6cef-45a6-8870-657c2c578797-operator-scripts\") pod \"nova-cell1-2c71-account-create-update-sgvgl\" (UID: \"778d4ca4-6cef-45a6-8870-657c2c578797\") " pod="openstack/nova-cell1-2c71-account-create-update-sgvgl" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.552726 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5dwg\" (UniqueName: \"kubernetes.io/projected/778d4ca4-6cef-45a6-8870-657c2c578797-kube-api-access-t5dwg\") pod \"nova-cell1-2c71-account-create-update-sgvgl\" (UID: \"778d4ca4-6cef-45a6-8870-657c2c578797\") " pod="openstack/nova-cell1-2c71-account-create-update-sgvgl" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.578601 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2c71-account-create-update-sgvgl" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.842644 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"93900793-e964-4665-8d8b-7a7dafc3652d","Type":"ContainerStarted","Data":"ec7545f80161168cdb2642216f715a156c1c3b53b6c2e1f2fc5134b9972dd4a7"} Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.843133 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="ceilometer-central-agent" containerID="cri-o://fa4f624305789bdc7c5eab95920d6cbcde6cab7a62a2b6c1f38605685bc60cd5" gracePeriod=30 Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.843208 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.843375 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="proxy-httpd" containerID="cri-o://ec7545f80161168cdb2642216f715a156c1c3b53b6c2e1f2fc5134b9972dd4a7" gracePeriod=30 Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.843435 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="sg-core" containerID="cri-o://b9af3d9746a8c69e9c9fc27f1bb3bb1810ce7e665587ba466c9798a81218f2e4" gracePeriod=30 Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.843486 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="ceilometer-notification-agent" containerID="cri-o://e103569a68bc3df3344544a89d4f62943f452c3f5a46a6e83ca83cd04df94db5" gracePeriod=30 Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.892060 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=9.400974741 podStartE2EDuration="13.892035258s" podCreationTimestamp="2026-01-06 08:35:38 +0000 UTC" firstStartedPulling="2026-01-06 08:35:46.175072248 +0000 UTC m=+1248.221245085" lastFinishedPulling="2026-01-06 08:35:50.666132765 +0000 UTC m=+1252.712305602" observedRunningTime="2026-01-06 08:35:51.867224465 +0000 UTC m=+1253.913397302" watchObservedRunningTime="2026-01-06 08:35:51.892035258 +0000 UTC m=+1253.938208095" Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.924629 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-8xwm9"] Jan 06 08:35:51 crc kubenswrapper[4784]: W0106 08:35:51.934155 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf57eec82_c0b1_41de_8082_a096a3e73acc.slice/crio-f38c263e8b2233bf07de87a339e0ef40af85bbdcd87ce0097b053740f5235652 WatchSource:0}: Error finding container f38c263e8b2233bf07de87a339e0ef40af85bbdcd87ce0097b053740f5235652: Status 404 returned error can't find the container with id f38c263e8b2233bf07de87a339e0ef40af85bbdcd87ce0097b053740f5235652 Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.945843 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-361f-account-create-update-wzgwj"] Jan 06 08:35:51 crc kubenswrapper[4784]: W0106 08:35:51.977315 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod776ae843_cb1c_4edd_9cb1_e7a9513e9aa5.slice/crio-8cd0f5f5068cbad0419b9ac8d6dd2015259ff3ef76cb77f9536a9f68280c9922 WatchSource:0}: Error finding container 8cd0f5f5068cbad0419b9ac8d6dd2015259ff3ef76cb77f9536a9f68280c9922: Status 404 returned error can't find the container with id 8cd0f5f5068cbad0419b9ac8d6dd2015259ff3ef76cb77f9536a9f68280c9922 Jan 06 08:35:51 crc kubenswrapper[4784]: I0106 08:35:51.983115 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-n9rlv"] Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.001045 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-kxmhp"] Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.157570 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-7e75-account-create-update-t2sj4"] Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.235049 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-2c71-account-create-update-sgvgl"] Jan 06 08:35:52 crc kubenswrapper[4784]: W0106 08:35:52.324025 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod778d4ca4_6cef_45a6_8870_657c2c578797.slice/crio-e29641c5f8cbc0201d2a34c834ba7cc8413242ebd08a8642b1d2d130b13362eb WatchSource:0}: Error finding container e29641c5f8cbc0201d2a34c834ba7cc8413242ebd08a8642b1d2d130b13362eb: Status 404 returned error can't find the container with id e29641c5f8cbc0201d2a34c834ba7cc8413242ebd08a8642b1d2d130b13362eb Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.863783 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2c71-account-create-update-sgvgl" event={"ID":"778d4ca4-6cef-45a6-8870-657c2c578797","Type":"ContainerStarted","Data":"a50750221049fbec4ad07b99f4ba3153c6a3c671546f9b7f4d3478557d012fcc"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.864298 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2c71-account-create-update-sgvgl" event={"ID":"778d4ca4-6cef-45a6-8870-657c2c578797","Type":"ContainerStarted","Data":"e29641c5f8cbc0201d2a34c834ba7cc8413242ebd08a8642b1d2d130b13362eb"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.874350 4784 generic.go:334] "Generic (PLEG): container finished" podID="93900793-e964-4665-8d8b-7a7dafc3652d" containerID="ec7545f80161168cdb2642216f715a156c1c3b53b6c2e1f2fc5134b9972dd4a7" exitCode=0 Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.874384 4784 generic.go:334] "Generic (PLEG): container finished" podID="93900793-e964-4665-8d8b-7a7dafc3652d" containerID="b9af3d9746a8c69e9c9fc27f1bb3bb1810ce7e665587ba466c9798a81218f2e4" exitCode=2 Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.874394 4784 generic.go:334] "Generic (PLEG): container finished" podID="93900793-e964-4665-8d8b-7a7dafc3652d" containerID="e103569a68bc3df3344544a89d4f62943f452c3f5a46a6e83ca83cd04df94db5" exitCode=0 Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.874401 4784 generic.go:334] "Generic (PLEG): container finished" podID="93900793-e964-4665-8d8b-7a7dafc3652d" containerID="fa4f624305789bdc7c5eab95920d6cbcde6cab7a62a2b6c1f38605685bc60cd5" exitCode=0 Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.874452 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"93900793-e964-4665-8d8b-7a7dafc3652d","Type":"ContainerDied","Data":"ec7545f80161168cdb2642216f715a156c1c3b53b6c2e1f2fc5134b9972dd4a7"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.874483 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"93900793-e964-4665-8d8b-7a7dafc3652d","Type":"ContainerDied","Data":"b9af3d9746a8c69e9c9fc27f1bb3bb1810ce7e665587ba466c9798a81218f2e4"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.874495 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"93900793-e964-4665-8d8b-7a7dafc3652d","Type":"ContainerDied","Data":"e103569a68bc3df3344544a89d4f62943f452c3f5a46a6e83ca83cd04df94db5"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.874503 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"93900793-e964-4665-8d8b-7a7dafc3652d","Type":"ContainerDied","Data":"fa4f624305789bdc7c5eab95920d6cbcde6cab7a62a2b6c1f38605685bc60cd5"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.874512 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"93900793-e964-4665-8d8b-7a7dafc3652d","Type":"ContainerDied","Data":"eba22092fd80a98774379cdb9df635dee13418b1cb2c6b34ddab7c6bf325ff6c"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.874525 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eba22092fd80a98774379cdb9df635dee13418b1cb2c6b34ddab7c6bf325ff6c" Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.905445 4784 generic.go:334] "Generic (PLEG): container finished" podID="3da71d4b-c3fe-4b9e-b6a7-63ababb2632c" containerID="db525362445edf924375717f5d9fb23cedb69000cd20cfb43d1dff81eaa3c9ea" exitCode=0 Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.905568 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7e75-account-create-update-t2sj4" event={"ID":"3da71d4b-c3fe-4b9e-b6a7-63ababb2632c","Type":"ContainerDied","Data":"db525362445edf924375717f5d9fb23cedb69000cd20cfb43d1dff81eaa3c9ea"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.905609 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7e75-account-create-update-t2sj4" event={"ID":"3da71d4b-c3fe-4b9e-b6a7-63ababb2632c","Type":"ContainerStarted","Data":"04529965f7a8a469acfb96396c8629623f63540605a5b4d6d611cca683ee15d3"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.915050 4784 generic.go:334] "Generic (PLEG): container finished" podID="f57eec82-c0b1-41de-8082-a096a3e73acc" containerID="c146bbd6766980179c44bb174dda179d82aa1f0d0e7eb63a3c52a015b45e5c48" exitCode=0 Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.915150 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-361f-account-create-update-wzgwj" event={"ID":"f57eec82-c0b1-41de-8082-a096a3e73acc","Type":"ContainerDied","Data":"c146bbd6766980179c44bb174dda179d82aa1f0d0e7eb63a3c52a015b45e5c48"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.915186 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-361f-account-create-update-wzgwj" event={"ID":"f57eec82-c0b1-41de-8082-a096a3e73acc","Type":"ContainerStarted","Data":"f38c263e8b2233bf07de87a339e0ef40af85bbdcd87ce0097b053740f5235652"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.918741 4784 generic.go:334] "Generic (PLEG): container finished" podID="d5ec4933-ceca-4a4f-9206-72e01f451292" containerID="f8ee637f6b50a818ba4066de18a62301be2e77b51f282c93e245f39888f43173" exitCode=0 Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.918904 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-8xwm9" event={"ID":"d5ec4933-ceca-4a4f-9206-72e01f451292","Type":"ContainerDied","Data":"f8ee637f6b50a818ba4066de18a62301be2e77b51f282c93e245f39888f43173"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.918997 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-8xwm9" event={"ID":"d5ec4933-ceca-4a4f-9206-72e01f451292","Type":"ContainerStarted","Data":"d3de94a15127062d8019a53964b5308e42e68b7e58b43d2647693ca780deda56"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.921159 4784 generic.go:334] "Generic (PLEG): container finished" podID="60288dfb-94e1-4aef-a67c-3ad3d457d124" containerID="9f16d9ac58ab757de59707bee78657c17595d2a5b6c9e51b71adc35b285b71e5" exitCode=0 Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.921273 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-kxmhp" event={"ID":"60288dfb-94e1-4aef-a67c-3ad3d457d124","Type":"ContainerDied","Data":"9f16d9ac58ab757de59707bee78657c17595d2a5b6c9e51b71adc35b285b71e5"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.921303 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-kxmhp" event={"ID":"60288dfb-94e1-4aef-a67c-3ad3d457d124","Type":"ContainerStarted","Data":"fa4606b94cbbb1a6dce96cd2c7e5604f5baba08dc307661609df7c27d83394fc"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.924689 4784 generic.go:334] "Generic (PLEG): container finished" podID="776ae843-cb1c-4edd-9cb1-e7a9513e9aa5" containerID="5eaffcc3fc842b86a3f57a8ce3fde89734b9bf87ec0b08a500a602f48a25ee64" exitCode=0 Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.924756 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-n9rlv" event={"ID":"776ae843-cb1c-4edd-9cb1-e7a9513e9aa5","Type":"ContainerDied","Data":"5eaffcc3fc842b86a3f57a8ce3fde89734b9bf87ec0b08a500a602f48a25ee64"} Jan 06 08:35:52 crc kubenswrapper[4784]: I0106 08:35:52.924788 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-n9rlv" event={"ID":"776ae843-cb1c-4edd-9cb1-e7a9513e9aa5","Type":"ContainerStarted","Data":"8cd0f5f5068cbad0419b9ac8d6dd2015259ff3ef76cb77f9536a9f68280c9922"} Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.028303 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.069605 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.070508 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c1b66b45-3ef2-49d9-aa09-490c73fa86aa" containerName="glance-log" containerID="cri-o://eb1555f1270b2e14f10017339475aaf7786ca925cd7726a1904ffbf885f7ab91" gracePeriod=30 Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.070826 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c1b66b45-3ef2-49d9-aa09-490c73fa86aa" containerName="glance-httpd" containerID="cri-o://20a195dc2ce1cb800121a2c6468d6eb5fcf3e4a1eb9f35f70c2f72e9d6f73e77" gracePeriod=30 Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.183341 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/93900793-e964-4665-8d8b-7a7dafc3652d-run-httpd\") pod \"93900793-e964-4665-8d8b-7a7dafc3652d\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.183535 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-config-data\") pod \"93900793-e964-4665-8d8b-7a7dafc3652d\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.183583 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkwmh\" (UniqueName: \"kubernetes.io/projected/93900793-e964-4665-8d8b-7a7dafc3652d-kube-api-access-nkwmh\") pod \"93900793-e964-4665-8d8b-7a7dafc3652d\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.183604 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/93900793-e964-4665-8d8b-7a7dafc3652d-log-httpd\") pod \"93900793-e964-4665-8d8b-7a7dafc3652d\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.183667 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-scripts\") pod \"93900793-e964-4665-8d8b-7a7dafc3652d\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.183727 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-combined-ca-bundle\") pod \"93900793-e964-4665-8d8b-7a7dafc3652d\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.183788 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-sg-core-conf-yaml\") pod \"93900793-e964-4665-8d8b-7a7dafc3652d\" (UID: \"93900793-e964-4665-8d8b-7a7dafc3652d\") " Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.185605 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93900793-e964-4665-8d8b-7a7dafc3652d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "93900793-e964-4665-8d8b-7a7dafc3652d" (UID: "93900793-e964-4665-8d8b-7a7dafc3652d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.185829 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93900793-e964-4665-8d8b-7a7dafc3652d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "93900793-e964-4665-8d8b-7a7dafc3652d" (UID: "93900793-e964-4665-8d8b-7a7dafc3652d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.193100 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93900793-e964-4665-8d8b-7a7dafc3652d-kube-api-access-nkwmh" (OuterVolumeSpecName: "kube-api-access-nkwmh") pod "93900793-e964-4665-8d8b-7a7dafc3652d" (UID: "93900793-e964-4665-8d8b-7a7dafc3652d"). InnerVolumeSpecName "kube-api-access-nkwmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.198497 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-scripts" (OuterVolumeSpecName: "scripts") pod "93900793-e964-4665-8d8b-7a7dafc3652d" (UID: "93900793-e964-4665-8d8b-7a7dafc3652d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.225578 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "93900793-e964-4665-8d8b-7a7dafc3652d" (UID: "93900793-e964-4665-8d8b-7a7dafc3652d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.286942 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkwmh\" (UniqueName: \"kubernetes.io/projected/93900793-e964-4665-8d8b-7a7dafc3652d-kube-api-access-nkwmh\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.286987 4784 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/93900793-e964-4665-8d8b-7a7dafc3652d-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.286998 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.287008 4784 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.287019 4784 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/93900793-e964-4665-8d8b-7a7dafc3652d-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.289434 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "93900793-e964-4665-8d8b-7a7dafc3652d" (UID: "93900793-e964-4665-8d8b-7a7dafc3652d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.305560 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-config-data" (OuterVolumeSpecName: "config-data") pod "93900793-e964-4665-8d8b-7a7dafc3652d" (UID: "93900793-e964-4665-8d8b-7a7dafc3652d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.389328 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.389371 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93900793-e964-4665-8d8b-7a7dafc3652d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.938576 4784 generic.go:334] "Generic (PLEG): container finished" podID="c1b66b45-3ef2-49d9-aa09-490c73fa86aa" containerID="eb1555f1270b2e14f10017339475aaf7786ca925cd7726a1904ffbf885f7ab91" exitCode=143 Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.938684 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c1b66b45-3ef2-49d9-aa09-490c73fa86aa","Type":"ContainerDied","Data":"eb1555f1270b2e14f10017339475aaf7786ca925cd7726a1904ffbf885f7ab91"} Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.940527 4784 generic.go:334] "Generic (PLEG): container finished" podID="778d4ca4-6cef-45a6-8870-657c2c578797" containerID="a50750221049fbec4ad07b99f4ba3153c6a3c671546f9b7f4d3478557d012fcc" exitCode=0 Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.940669 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.940751 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2c71-account-create-update-sgvgl" event={"ID":"778d4ca4-6cef-45a6-8870-657c2c578797","Type":"ContainerDied","Data":"a50750221049fbec4ad07b99f4ba3153c6a3c671546f9b7f4d3478557d012fcc"} Jan 06 08:35:53 crc kubenswrapper[4784]: I0106 08:35:53.991277 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:53.999910 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.021561 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:54 crc kubenswrapper[4784]: E0106 08:35:54.022981 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="sg-core" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.023003 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="sg-core" Jan 06 08:35:54 crc kubenswrapper[4784]: E0106 08:35:54.023018 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="ceilometer-notification-agent" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.023025 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="ceilometer-notification-agent" Jan 06 08:35:54 crc kubenswrapper[4784]: E0106 08:35:54.023068 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="proxy-httpd" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.023075 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="proxy-httpd" Jan 06 08:35:54 crc kubenswrapper[4784]: E0106 08:35:54.023098 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="ceilometer-central-agent" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.023104 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="ceilometer-central-agent" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.023301 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="proxy-httpd" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.023316 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="ceilometer-notification-agent" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.023326 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="sg-core" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.023336 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" containerName="ceilometer-central-agent" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.025740 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.030480 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.032972 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.060964 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.103864 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fn46h\" (UniqueName: \"kubernetes.io/projected/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-kube-api-access-fn46h\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.103938 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-config-data\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.104001 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.104255 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.104279 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-log-httpd\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.104298 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-run-httpd\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.104357 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-scripts\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.211302 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-log-httpd\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.211417 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-run-httpd\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.211681 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-scripts\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.211788 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fn46h\" (UniqueName: \"kubernetes.io/projected/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-kube-api-access-fn46h\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.211955 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-config-data\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.212141 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.212215 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-log-httpd\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.212392 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.214085 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-run-httpd\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.223693 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-scripts\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.227369 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.241222 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-config-data\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.241789 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.249854 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fn46h\" (UniqueName: \"kubernetes.io/projected/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-kube-api-access-fn46h\") pod \"ceilometer-0\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.332411 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93900793-e964-4665-8d8b-7a7dafc3652d" path="/var/lib/kubelet/pods/93900793-e964-4665-8d8b-7a7dafc3652d/volumes" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.404882 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.517669 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7e75-account-create-update-t2sj4" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.646743 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kszvm\" (UniqueName: \"kubernetes.io/projected/3da71d4b-c3fe-4b9e-b6a7-63ababb2632c-kube-api-access-kszvm\") pod \"3da71d4b-c3fe-4b9e-b6a7-63ababb2632c\" (UID: \"3da71d4b-c3fe-4b9e-b6a7-63ababb2632c\") " Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.646932 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3da71d4b-c3fe-4b9e-b6a7-63ababb2632c-operator-scripts\") pod \"3da71d4b-c3fe-4b9e-b6a7-63ababb2632c\" (UID: \"3da71d4b-c3fe-4b9e-b6a7-63ababb2632c\") " Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.651315 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3da71d4b-c3fe-4b9e-b6a7-63ababb2632c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3da71d4b-c3fe-4b9e-b6a7-63ababb2632c" (UID: "3da71d4b-c3fe-4b9e-b6a7-63ababb2632c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.661702 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3da71d4b-c3fe-4b9e-b6a7-63ababb2632c-kube-api-access-kszvm" (OuterVolumeSpecName: "kube-api-access-kszvm") pod "3da71d4b-c3fe-4b9e-b6a7-63ababb2632c" (UID: "3da71d4b-c3fe-4b9e-b6a7-63ababb2632c"). InnerVolumeSpecName "kube-api-access-kszvm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.751075 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kszvm\" (UniqueName: \"kubernetes.io/projected/3da71d4b-c3fe-4b9e-b6a7-63ababb2632c-kube-api-access-kszvm\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.751620 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3da71d4b-c3fe-4b9e-b6a7-63ababb2632c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.914708 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-361f-account-create-update-wzgwj" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.926818 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-n9rlv" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.943636 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-kxmhp" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.962103 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2c71-account-create-update-sgvgl" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.972194 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-n9rlv" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.972420 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-n9rlv" event={"ID":"776ae843-cb1c-4edd-9cb1-e7a9513e9aa5","Type":"ContainerDied","Data":"8cd0f5f5068cbad0419b9ac8d6dd2015259ff3ef76cb77f9536a9f68280c9922"} Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.972468 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8cd0f5f5068cbad0419b9ac8d6dd2015259ff3ef76cb77f9536a9f68280c9922" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.974077 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2c71-account-create-update-sgvgl" event={"ID":"778d4ca4-6cef-45a6-8870-657c2c578797","Type":"ContainerDied","Data":"e29641c5f8cbc0201d2a34c834ba7cc8413242ebd08a8642b1d2d130b13362eb"} Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.974096 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e29641c5f8cbc0201d2a34c834ba7cc8413242ebd08a8642b1d2d130b13362eb" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.974155 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2c71-account-create-update-sgvgl" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.976382 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8xwm9" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.976721 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7e75-account-create-update-t2sj4" event={"ID":"3da71d4b-c3fe-4b9e-b6a7-63ababb2632c","Type":"ContainerDied","Data":"04529965f7a8a469acfb96396c8629623f63540605a5b4d6d611cca683ee15d3"} Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.976745 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="04529965f7a8a469acfb96396c8629623f63540605a5b4d6d611cca683ee15d3" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.976809 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7e75-account-create-update-t2sj4" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.983951 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.984311 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="1424869a-8bd7-4f1c-9f98-17a826550168" containerName="glance-log" containerID="cri-o://0a80bcc5fb85ee08a7f09e9a98db4434401965e050e8d816afb93ea83870ade1" gracePeriod=30 Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.984453 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="1424869a-8bd7-4f1c-9f98-17a826550168" containerName="glance-httpd" containerID="cri-o://fd83bbd26fefdde8520038bbfa17bc570843c33022e2fb92a18c631bd3f1d88c" gracePeriod=30 Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.995533 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-361f-account-create-update-wzgwj" event={"ID":"f57eec82-c0b1-41de-8082-a096a3e73acc","Type":"ContainerDied","Data":"f38c263e8b2233bf07de87a339e0ef40af85bbdcd87ce0097b053740f5235652"} Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.995578 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f38c263e8b2233bf07de87a339e0ef40af85bbdcd87ce0097b053740f5235652" Jan 06 08:35:54 crc kubenswrapper[4784]: I0106 08:35:54.995641 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-361f-account-create-update-wzgwj" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.001851 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-8xwm9" event={"ID":"d5ec4933-ceca-4a4f-9206-72e01f451292","Type":"ContainerDied","Data":"d3de94a15127062d8019a53964b5308e42e68b7e58b43d2647693ca780deda56"} Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.001906 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d3de94a15127062d8019a53964b5308e42e68b7e58b43d2647693ca780deda56" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.001999 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8xwm9" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.013072 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-kxmhp" event={"ID":"60288dfb-94e1-4aef-a67c-3ad3d457d124","Type":"ContainerDied","Data":"fa4606b94cbbb1a6dce96cd2c7e5604f5baba08dc307661609df7c27d83394fc"} Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.013117 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fa4606b94cbbb1a6dce96cd2c7e5604f5baba08dc307661609df7c27d83394fc" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.013154 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-kxmhp" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.067822 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xv2jd\" (UniqueName: \"kubernetes.io/projected/f57eec82-c0b1-41de-8082-a096a3e73acc-kube-api-access-xv2jd\") pod \"f57eec82-c0b1-41de-8082-a096a3e73acc\" (UID: \"f57eec82-c0b1-41de-8082-a096a3e73acc\") " Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.067883 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/778d4ca4-6cef-45a6-8870-657c2c578797-operator-scripts\") pod \"778d4ca4-6cef-45a6-8870-657c2c578797\" (UID: \"778d4ca4-6cef-45a6-8870-657c2c578797\") " Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.068012 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/60288dfb-94e1-4aef-a67c-3ad3d457d124-operator-scripts\") pod \"60288dfb-94e1-4aef-a67c-3ad3d457d124\" (UID: \"60288dfb-94e1-4aef-a67c-3ad3d457d124\") " Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.068106 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f57eec82-c0b1-41de-8082-a096a3e73acc-operator-scripts\") pod \"f57eec82-c0b1-41de-8082-a096a3e73acc\" (UID: \"f57eec82-c0b1-41de-8082-a096a3e73acc\") " Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.068285 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqvsl\" (UniqueName: \"kubernetes.io/projected/776ae843-cb1c-4edd-9cb1-e7a9513e9aa5-kube-api-access-cqvsl\") pod \"776ae843-cb1c-4edd-9cb1-e7a9513e9aa5\" (UID: \"776ae843-cb1c-4edd-9cb1-e7a9513e9aa5\") " Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.068313 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/776ae843-cb1c-4edd-9cb1-e7a9513e9aa5-operator-scripts\") pod \"776ae843-cb1c-4edd-9cb1-e7a9513e9aa5\" (UID: \"776ae843-cb1c-4edd-9cb1-e7a9513e9aa5\") " Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.068348 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5ec4933-ceca-4a4f-9206-72e01f451292-operator-scripts\") pod \"d5ec4933-ceca-4a4f-9206-72e01f451292\" (UID: \"d5ec4933-ceca-4a4f-9206-72e01f451292\") " Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.068377 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dsmzh\" (UniqueName: \"kubernetes.io/projected/60288dfb-94e1-4aef-a67c-3ad3d457d124-kube-api-access-dsmzh\") pod \"60288dfb-94e1-4aef-a67c-3ad3d457d124\" (UID: \"60288dfb-94e1-4aef-a67c-3ad3d457d124\") " Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.068456 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t5dwg\" (UniqueName: \"kubernetes.io/projected/778d4ca4-6cef-45a6-8870-657c2c578797-kube-api-access-t5dwg\") pod \"778d4ca4-6cef-45a6-8870-657c2c578797\" (UID: \"778d4ca4-6cef-45a6-8870-657c2c578797\") " Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.069046 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/776ae843-cb1c-4edd-9cb1-e7a9513e9aa5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "776ae843-cb1c-4edd-9cb1-e7a9513e9aa5" (UID: "776ae843-cb1c-4edd-9cb1-e7a9513e9aa5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.070170 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5ec4933-ceca-4a4f-9206-72e01f451292-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d5ec4933-ceca-4a4f-9206-72e01f451292" (UID: "d5ec4933-ceca-4a4f-9206-72e01f451292"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.070610 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/778d4ca4-6cef-45a6-8870-657c2c578797-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "778d4ca4-6cef-45a6-8870-657c2c578797" (UID: "778d4ca4-6cef-45a6-8870-657c2c578797"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.071060 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60288dfb-94e1-4aef-a67c-3ad3d457d124-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "60288dfb-94e1-4aef-a67c-3ad3d457d124" (UID: "60288dfb-94e1-4aef-a67c-3ad3d457d124"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.071265 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f57eec82-c0b1-41de-8082-a096a3e73acc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f57eec82-c0b1-41de-8082-a096a3e73acc" (UID: "f57eec82-c0b1-41de-8082-a096a3e73acc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.086080 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60288dfb-94e1-4aef-a67c-3ad3d457d124-kube-api-access-dsmzh" (OuterVolumeSpecName: "kube-api-access-dsmzh") pod "60288dfb-94e1-4aef-a67c-3ad3d457d124" (UID: "60288dfb-94e1-4aef-a67c-3ad3d457d124"). InnerVolumeSpecName "kube-api-access-dsmzh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.099085 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/776ae843-cb1c-4edd-9cb1-e7a9513e9aa5-kube-api-access-cqvsl" (OuterVolumeSpecName: "kube-api-access-cqvsl") pod "776ae843-cb1c-4edd-9cb1-e7a9513e9aa5" (UID: "776ae843-cb1c-4edd-9cb1-e7a9513e9aa5"). InnerVolumeSpecName "kube-api-access-cqvsl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.099159 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/778d4ca4-6cef-45a6-8870-657c2c578797-kube-api-access-t5dwg" (OuterVolumeSpecName: "kube-api-access-t5dwg") pod "778d4ca4-6cef-45a6-8870-657c2c578797" (UID: "778d4ca4-6cef-45a6-8870-657c2c578797"). InnerVolumeSpecName "kube-api-access-t5dwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.100938 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f57eec82-c0b1-41de-8082-a096a3e73acc-kube-api-access-xv2jd" (OuterVolumeSpecName: "kube-api-access-xv2jd") pod "f57eec82-c0b1-41de-8082-a096a3e73acc" (UID: "f57eec82-c0b1-41de-8082-a096a3e73acc"). InnerVolumeSpecName "kube-api-access-xv2jd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.118617 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.170415 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z25zd\" (UniqueName: \"kubernetes.io/projected/d5ec4933-ceca-4a4f-9206-72e01f451292-kube-api-access-z25zd\") pod \"d5ec4933-ceca-4a4f-9206-72e01f451292\" (UID: \"d5ec4933-ceca-4a4f-9206-72e01f451292\") " Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.171331 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/60288dfb-94e1-4aef-a67c-3ad3d457d124-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.171349 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f57eec82-c0b1-41de-8082-a096a3e73acc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.171360 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqvsl\" (UniqueName: \"kubernetes.io/projected/776ae843-cb1c-4edd-9cb1-e7a9513e9aa5-kube-api-access-cqvsl\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.171373 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/776ae843-cb1c-4edd-9cb1-e7a9513e9aa5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.171383 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5ec4933-ceca-4a4f-9206-72e01f451292-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.171392 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dsmzh\" (UniqueName: \"kubernetes.io/projected/60288dfb-94e1-4aef-a67c-3ad3d457d124-kube-api-access-dsmzh\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.171403 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t5dwg\" (UniqueName: \"kubernetes.io/projected/778d4ca4-6cef-45a6-8870-657c2c578797-kube-api-access-t5dwg\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.171415 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xv2jd\" (UniqueName: \"kubernetes.io/projected/f57eec82-c0b1-41de-8082-a096a3e73acc-kube-api-access-xv2jd\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.171424 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/778d4ca4-6cef-45a6-8870-657c2c578797-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.176363 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5ec4933-ceca-4a4f-9206-72e01f451292-kube-api-access-z25zd" (OuterVolumeSpecName: "kube-api-access-z25zd") pod "d5ec4933-ceca-4a4f-9206-72e01f451292" (UID: "d5ec4933-ceca-4a4f-9206-72e01f451292"). InnerVolumeSpecName "kube-api-access-z25zd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:55 crc kubenswrapper[4784]: I0106 08:35:55.274328 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z25zd\" (UniqueName: \"kubernetes.io/projected/d5ec4933-ceca-4a4f-9206-72e01f451292-kube-api-access-z25zd\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.034453 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39","Type":"ContainerStarted","Data":"6473bdada60e3f882229efb50ebd863a01b3d120f7da224d85376ca57a72d692"} Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.034946 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39","Type":"ContainerStarted","Data":"a816908a86fdb3eb8cad08de3c3a5339716c0ec63c72c95647507a993dc07bb4"} Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.039649 4784 generic.go:334] "Generic (PLEG): container finished" podID="1424869a-8bd7-4f1c-9f98-17a826550168" containerID="0a80bcc5fb85ee08a7f09e9a98db4434401965e050e8d816afb93ea83870ade1" exitCode=143 Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.039721 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1424869a-8bd7-4f1c-9f98-17a826550168","Type":"ContainerDied","Data":"0a80bcc5fb85ee08a7f09e9a98db4434401965e050e8d816afb93ea83870ade1"} Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.209429 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.791979 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.807055 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.807221 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-scripts\") pod \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.807267 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-public-tls-certs\") pod \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.807382 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-logs\") pod \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.807437 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-combined-ca-bundle\") pod \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.807478 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-config-data\") pod \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.807563 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-httpd-run\") pod \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.807599 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85nbb\" (UniqueName: \"kubernetes.io/projected/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-kube-api-access-85nbb\") pod \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\" (UID: \"c1b66b45-3ef2-49d9-aa09-490c73fa86aa\") " Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.807890 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-logs" (OuterVolumeSpecName: "logs") pod "c1b66b45-3ef2-49d9-aa09-490c73fa86aa" (UID: "c1b66b45-3ef2-49d9-aa09-490c73fa86aa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.808397 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.808611 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c1b66b45-3ef2-49d9-aa09-490c73fa86aa" (UID: "c1b66b45-3ef2-49d9-aa09-490c73fa86aa"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.814724 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "c1b66b45-3ef2-49d9-aa09-490c73fa86aa" (UID: "c1b66b45-3ef2-49d9-aa09-490c73fa86aa"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.814758 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-kube-api-access-85nbb" (OuterVolumeSpecName: "kube-api-access-85nbb") pod "c1b66b45-3ef2-49d9-aa09-490c73fa86aa" (UID: "c1b66b45-3ef2-49d9-aa09-490c73fa86aa"). InnerVolumeSpecName "kube-api-access-85nbb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.822360 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-scripts" (OuterVolumeSpecName: "scripts") pod "c1b66b45-3ef2-49d9-aa09-490c73fa86aa" (UID: "c1b66b45-3ef2-49d9-aa09-490c73fa86aa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.856911 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c1b66b45-3ef2-49d9-aa09-490c73fa86aa" (UID: "c1b66b45-3ef2-49d9-aa09-490c73fa86aa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.887325 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c1b66b45-3ef2-49d9-aa09-490c73fa86aa" (UID: "c1b66b45-3ef2-49d9-aa09-490c73fa86aa"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.902742 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-config-data" (OuterVolumeSpecName: "config-data") pod "c1b66b45-3ef2-49d9-aa09-490c73fa86aa" (UID: "c1b66b45-3ef2-49d9-aa09-490c73fa86aa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.912380 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.912439 4784 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.912457 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.912475 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.912489 4784 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.912505 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85nbb\" (UniqueName: \"kubernetes.io/projected/c1b66b45-3ef2-49d9-aa09-490c73fa86aa-kube-api-access-85nbb\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.912568 4784 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 06 08:35:56 crc kubenswrapper[4784]: I0106 08:35:56.955026 4784 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.014558 4784 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.056961 4784 generic.go:334] "Generic (PLEG): container finished" podID="c1b66b45-3ef2-49d9-aa09-490c73fa86aa" containerID="20a195dc2ce1cb800121a2c6468d6eb5fcf3e4a1eb9f35f70c2f72e9d6f73e77" exitCode=0 Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.057052 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c1b66b45-3ef2-49d9-aa09-490c73fa86aa","Type":"ContainerDied","Data":"20a195dc2ce1cb800121a2c6468d6eb5fcf3e4a1eb9f35f70c2f72e9d6f73e77"} Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.057109 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c1b66b45-3ef2-49d9-aa09-490c73fa86aa","Type":"ContainerDied","Data":"a769b546a27590185a95af6106dbdcbf851665b3e6e8e2f3897605fcb60e0a61"} Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.057136 4784 scope.go:117] "RemoveContainer" containerID="20a195dc2ce1cb800121a2c6468d6eb5fcf3e4a1eb9f35f70c2f72e9d6f73e77" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.057310 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.066207 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39","Type":"ContainerStarted","Data":"c4bc7ad63841c61a6608410c245bb3afbfc9514e961be0055d312007c701bf46"} Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.117006 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.121052 4784 scope.go:117] "RemoveContainer" containerID="eb1555f1270b2e14f10017339475aaf7786ca925cd7726a1904ffbf885f7ab91" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.137174 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.164512 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:35:57 crc kubenswrapper[4784]: E0106 08:35:57.165284 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1b66b45-3ef2-49d9-aa09-490c73fa86aa" containerName="glance-log" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.165387 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1b66b45-3ef2-49d9-aa09-490c73fa86aa" containerName="glance-log" Jan 06 08:35:57 crc kubenswrapper[4784]: E0106 08:35:57.165453 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f57eec82-c0b1-41de-8082-a096a3e73acc" containerName="mariadb-account-create-update" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.165512 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f57eec82-c0b1-41de-8082-a096a3e73acc" containerName="mariadb-account-create-update" Jan 06 08:35:57 crc kubenswrapper[4784]: E0106 08:35:57.165603 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5ec4933-ceca-4a4f-9206-72e01f451292" containerName="mariadb-database-create" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.165661 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5ec4933-ceca-4a4f-9206-72e01f451292" containerName="mariadb-database-create" Jan 06 08:35:57 crc kubenswrapper[4784]: E0106 08:35:57.165723 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="778d4ca4-6cef-45a6-8870-657c2c578797" containerName="mariadb-account-create-update" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.165774 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="778d4ca4-6cef-45a6-8870-657c2c578797" containerName="mariadb-account-create-update" Jan 06 08:35:57 crc kubenswrapper[4784]: E0106 08:35:57.165853 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60288dfb-94e1-4aef-a67c-3ad3d457d124" containerName="mariadb-database-create" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.165913 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="60288dfb-94e1-4aef-a67c-3ad3d457d124" containerName="mariadb-database-create" Jan 06 08:35:57 crc kubenswrapper[4784]: E0106 08:35:57.165974 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3da71d4b-c3fe-4b9e-b6a7-63ababb2632c" containerName="mariadb-account-create-update" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.166025 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3da71d4b-c3fe-4b9e-b6a7-63ababb2632c" containerName="mariadb-account-create-update" Jan 06 08:35:57 crc kubenswrapper[4784]: E0106 08:35:57.166086 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="776ae843-cb1c-4edd-9cb1-e7a9513e9aa5" containerName="mariadb-database-create" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.166136 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="776ae843-cb1c-4edd-9cb1-e7a9513e9aa5" containerName="mariadb-database-create" Jan 06 08:35:57 crc kubenswrapper[4784]: E0106 08:35:57.166194 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1b66b45-3ef2-49d9-aa09-490c73fa86aa" containerName="glance-httpd" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.166251 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1b66b45-3ef2-49d9-aa09-490c73fa86aa" containerName="glance-httpd" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.166493 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1b66b45-3ef2-49d9-aa09-490c73fa86aa" containerName="glance-httpd" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.166584 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="3da71d4b-c3fe-4b9e-b6a7-63ababb2632c" containerName="mariadb-account-create-update" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.166724 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="776ae843-cb1c-4edd-9cb1-e7a9513e9aa5" containerName="mariadb-database-create" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.166788 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1b66b45-3ef2-49d9-aa09-490c73fa86aa" containerName="glance-log" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.166860 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f57eec82-c0b1-41de-8082-a096a3e73acc" containerName="mariadb-account-create-update" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.166918 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="778d4ca4-6cef-45a6-8870-657c2c578797" containerName="mariadb-account-create-update" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.166989 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="60288dfb-94e1-4aef-a67c-3ad3d457d124" containerName="mariadb-database-create" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.167044 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5ec4933-ceca-4a4f-9206-72e01f451292" containerName="mariadb-database-create" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.168727 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.172194 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.172893 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.175686 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.218696 4784 scope.go:117] "RemoveContainer" containerID="20a195dc2ce1cb800121a2c6468d6eb5fcf3e4a1eb9f35f70c2f72e9d6f73e77" Jan 06 08:35:57 crc kubenswrapper[4784]: E0106 08:35:57.221609 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20a195dc2ce1cb800121a2c6468d6eb5fcf3e4a1eb9f35f70c2f72e9d6f73e77\": container with ID starting with 20a195dc2ce1cb800121a2c6468d6eb5fcf3e4a1eb9f35f70c2f72e9d6f73e77 not found: ID does not exist" containerID="20a195dc2ce1cb800121a2c6468d6eb5fcf3e4a1eb9f35f70c2f72e9d6f73e77" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.221649 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20a195dc2ce1cb800121a2c6468d6eb5fcf3e4a1eb9f35f70c2f72e9d6f73e77"} err="failed to get container status \"20a195dc2ce1cb800121a2c6468d6eb5fcf3e4a1eb9f35f70c2f72e9d6f73e77\": rpc error: code = NotFound desc = could not find container \"20a195dc2ce1cb800121a2c6468d6eb5fcf3e4a1eb9f35f70c2f72e9d6f73e77\": container with ID starting with 20a195dc2ce1cb800121a2c6468d6eb5fcf3e4a1eb9f35f70c2f72e9d6f73e77 not found: ID does not exist" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.221677 4784 scope.go:117] "RemoveContainer" containerID="eb1555f1270b2e14f10017339475aaf7786ca925cd7726a1904ffbf885f7ab91" Jan 06 08:35:57 crc kubenswrapper[4784]: E0106 08:35:57.221914 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb1555f1270b2e14f10017339475aaf7786ca925cd7726a1904ffbf885f7ab91\": container with ID starting with eb1555f1270b2e14f10017339475aaf7786ca925cd7726a1904ffbf885f7ab91 not found: ID does not exist" containerID="eb1555f1270b2e14f10017339475aaf7786ca925cd7726a1904ffbf885f7ab91" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.222010 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb1555f1270b2e14f10017339475aaf7786ca925cd7726a1904ffbf885f7ab91"} err="failed to get container status \"eb1555f1270b2e14f10017339475aaf7786ca925cd7726a1904ffbf885f7ab91\": rpc error: code = NotFound desc = could not find container \"eb1555f1270b2e14f10017339475aaf7786ca925cd7726a1904ffbf885f7ab91\": container with ID starting with eb1555f1270b2e14f10017339475aaf7786ca925cd7726a1904ffbf885f7ab91 not found: ID does not exist" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.321815 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmlxj\" (UniqueName: \"kubernetes.io/projected/fecd8c1e-482d-4469-a884-c357e0e66fe0-kube-api-access-hmlxj\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.321892 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-config-data\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.321992 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.322120 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fecd8c1e-482d-4469-a884-c357e0e66fe0-logs\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.322192 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-scripts\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.322245 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fecd8c1e-482d-4469-a884-c357e0e66fe0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.322297 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.322339 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.423892 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fecd8c1e-482d-4469-a884-c357e0e66fe0-logs\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.424240 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-scripts\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.424468 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fecd8c1e-482d-4469-a884-c357e0e66fe0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.424580 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.424670 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.424787 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmlxj\" (UniqueName: \"kubernetes.io/projected/fecd8c1e-482d-4469-a884-c357e0e66fe0-kube-api-access-hmlxj\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.424880 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-config-data\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.425035 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.425769 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fecd8c1e-482d-4469-a884-c357e0e66fe0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.425948 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.426416 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fecd8c1e-482d-4469-a884-c357e0e66fe0-logs\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.434759 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-config-data\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.434884 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-scripts\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.435182 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.436155 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.444464 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmlxj\" (UniqueName: \"kubernetes.io/projected/fecd8c1e-482d-4469-a884-c357e0e66fe0-kube-api-access-hmlxj\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.495254 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " pod="openstack/glance-default-external-api-0" Jan 06 08:35:57 crc kubenswrapper[4784]: I0106 08:35:57.794200 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 06 08:35:58 crc kubenswrapper[4784]: I0106 08:35:58.080584 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39","Type":"ContainerStarted","Data":"b4c8ef0ae20892b25cfc81d4c896518652ab0b27ef421e3d8a420dafd16a197b"} Jan 06 08:35:58 crc kubenswrapper[4784]: I0106 08:35:58.155497 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="1424869a-8bd7-4f1c-9f98-17a826550168" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.149:9292/healthcheck\": read tcp 10.217.0.2:52834->10.217.0.149:9292: read: connection reset by peer" Jan 06 08:35:58 crc kubenswrapper[4784]: I0106 08:35:58.155583 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="1424869a-8bd7-4f1c-9f98-17a826550168" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.149:9292/healthcheck\": read tcp 10.217.0.2:52820->10.217.0.149:9292: read: connection reset by peer" Jan 06 08:35:58 crc kubenswrapper[4784]: I0106 08:35:58.341469 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1b66b45-3ef2-49d9-aa09-490c73fa86aa" path="/var/lib/kubelet/pods/c1b66b45-3ef2-49d9-aa09-490c73fa86aa/volumes" Jan 06 08:35:58 crc kubenswrapper[4784]: I0106 08:35:58.434044 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.095814 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"fecd8c1e-482d-4469-a884-c357e0e66fe0","Type":"ContainerStarted","Data":"35363fe16c783651f53f84efaf787460f1de140c24571253d432c8982d3aa3c6"} Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.108316 4784 generic.go:334] "Generic (PLEG): container finished" podID="1424869a-8bd7-4f1c-9f98-17a826550168" containerID="fd83bbd26fefdde8520038bbfa17bc570843c33022e2fb92a18c631bd3f1d88c" exitCode=0 Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.108363 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1424869a-8bd7-4f1c-9f98-17a826550168","Type":"ContainerDied","Data":"fd83bbd26fefdde8520038bbfa17bc570843c33022e2fb92a18c631bd3f1d88c"} Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.390019 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.572658 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-config-data\") pod \"1424869a-8bd7-4f1c-9f98-17a826550168\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.572734 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-internal-tls-certs\") pod \"1424869a-8bd7-4f1c-9f98-17a826550168\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.572838 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-combined-ca-bundle\") pod \"1424869a-8bd7-4f1c-9f98-17a826550168\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.572938 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1424869a-8bd7-4f1c-9f98-17a826550168-logs\") pod \"1424869a-8bd7-4f1c-9f98-17a826550168\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.574241 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1424869a-8bd7-4f1c-9f98-17a826550168-httpd-run\") pod \"1424869a-8bd7-4f1c-9f98-17a826550168\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.574313 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"1424869a-8bd7-4f1c-9f98-17a826550168\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.574391 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvq6r\" (UniqueName: \"kubernetes.io/projected/1424869a-8bd7-4f1c-9f98-17a826550168-kube-api-access-pvq6r\") pod \"1424869a-8bd7-4f1c-9f98-17a826550168\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.574438 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-scripts\") pod \"1424869a-8bd7-4f1c-9f98-17a826550168\" (UID: \"1424869a-8bd7-4f1c-9f98-17a826550168\") " Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.575527 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1424869a-8bd7-4f1c-9f98-17a826550168-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "1424869a-8bd7-4f1c-9f98-17a826550168" (UID: "1424869a-8bd7-4f1c-9f98-17a826550168"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.576009 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1424869a-8bd7-4f1c-9f98-17a826550168-logs" (OuterVolumeSpecName: "logs") pod "1424869a-8bd7-4f1c-9f98-17a826550168" (UID: "1424869a-8bd7-4f1c-9f98-17a826550168"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.577465 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1424869a-8bd7-4f1c-9f98-17a826550168-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.577485 4784 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1424869a-8bd7-4f1c-9f98-17a826550168-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.581807 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "1424869a-8bd7-4f1c-9f98-17a826550168" (UID: "1424869a-8bd7-4f1c-9f98-17a826550168"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.583052 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1424869a-8bd7-4f1c-9f98-17a826550168-kube-api-access-pvq6r" (OuterVolumeSpecName: "kube-api-access-pvq6r") pod "1424869a-8bd7-4f1c-9f98-17a826550168" (UID: "1424869a-8bd7-4f1c-9f98-17a826550168"). InnerVolumeSpecName "kube-api-access-pvq6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.587849 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-scripts" (OuterVolumeSpecName: "scripts") pod "1424869a-8bd7-4f1c-9f98-17a826550168" (UID: "1424869a-8bd7-4f1c-9f98-17a826550168"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.622564 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1424869a-8bd7-4f1c-9f98-17a826550168" (UID: "1424869a-8bd7-4f1c-9f98-17a826550168"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.626355 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-config-data" (OuterVolumeSpecName: "config-data") pod "1424869a-8bd7-4f1c-9f98-17a826550168" (UID: "1424869a-8bd7-4f1c-9f98-17a826550168"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.649856 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "1424869a-8bd7-4f1c-9f98-17a826550168" (UID: "1424869a-8bd7-4f1c-9f98-17a826550168"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.680260 4784 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.680307 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvq6r\" (UniqueName: \"kubernetes.io/projected/1424869a-8bd7-4f1c-9f98-17a826550168-kube-api-access-pvq6r\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.680323 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.680333 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.680345 4784 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.680357 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1424869a-8bd7-4f1c-9f98-17a826550168-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.750850 4784 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Jan 06 08:35:59 crc kubenswrapper[4784]: I0106 08:35:59.783217 4784 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.122167 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"fecd8c1e-482d-4469-a884-c357e0e66fe0","Type":"ContainerStarted","Data":"daa8748da271a4d548f8c192fbf0ea343adcd94a49510154aa63c807c38815e3"} Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.122244 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"fecd8c1e-482d-4469-a884-c357e0e66fe0","Type":"ContainerStarted","Data":"c7a53b94cb251c8ef3e62bbecc07389e6162337f8fbd7425b6a2aa4930128cb4"} Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.128877 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39","Type":"ContainerStarted","Data":"b47f2336ef8da7f2f8e1e64def1247955df31b83fd98dd43e5ed16a674198432"} Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.128976 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="proxy-httpd" containerID="cri-o://b47f2336ef8da7f2f8e1e64def1247955df31b83fd98dd43e5ed16a674198432" gracePeriod=30 Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.128978 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="ceilometer-central-agent" containerID="cri-o://6473bdada60e3f882229efb50ebd863a01b3d120f7da224d85376ca57a72d692" gracePeriod=30 Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.128983 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="sg-core" containerID="cri-o://b4c8ef0ae20892b25cfc81d4c896518652ab0b27ef421e3d8a420dafd16a197b" gracePeriod=30 Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.129000 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.129057 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="ceilometer-notification-agent" containerID="cri-o://c4bc7ad63841c61a6608410c245bb3afbfc9514e961be0055d312007c701bf46" gracePeriod=30 Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.133261 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1424869a-8bd7-4f1c-9f98-17a826550168","Type":"ContainerDied","Data":"5da4fb387f7cf2f4d53ccb440e69652edc954f6d4926f8e97aada01737359b9e"} Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.133314 4784 scope.go:117] "RemoveContainer" containerID="fd83bbd26fefdde8520038bbfa17bc570843c33022e2fb92a18c631bd3f1d88c" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.133521 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.161872 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.16184161 podStartE2EDuration="3.16184161s" podCreationTimestamp="2026-01-06 08:35:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:36:00.152334264 +0000 UTC m=+1262.198507101" watchObservedRunningTime="2026-01-06 08:36:00.16184161 +0000 UTC m=+1262.208014447" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.163882 4784 scope.go:117] "RemoveContainer" containerID="0a80bcc5fb85ee08a7f09e9a98db4434401965e050e8d816afb93ea83870ade1" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.186164 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.201320 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.214646 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:36:00 crc kubenswrapper[4784]: E0106 08:36:00.215179 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1424869a-8bd7-4f1c-9f98-17a826550168" containerName="glance-log" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.215199 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="1424869a-8bd7-4f1c-9f98-17a826550168" containerName="glance-log" Jan 06 08:36:00 crc kubenswrapper[4784]: E0106 08:36:00.215231 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1424869a-8bd7-4f1c-9f98-17a826550168" containerName="glance-httpd" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.215242 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="1424869a-8bd7-4f1c-9f98-17a826550168" containerName="glance-httpd" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.215401 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="1424869a-8bd7-4f1c-9f98-17a826550168" containerName="glance-log" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.215426 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="1424869a-8bd7-4f1c-9f98-17a826550168" containerName="glance-httpd" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.216478 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.217449 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.418611981 podStartE2EDuration="7.217419301s" podCreationTimestamp="2026-01-06 08:35:53 +0000 UTC" firstStartedPulling="2026-01-06 08:35:55.136969221 +0000 UTC m=+1257.183142058" lastFinishedPulling="2026-01-06 08:35:58.935776541 +0000 UTC m=+1260.981949378" observedRunningTime="2026-01-06 08:36:00.212622111 +0000 UTC m=+1262.258794948" watchObservedRunningTime="2026-01-06 08:36:00.217419301 +0000 UTC m=+1262.263592138" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.220471 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.220632 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.241443 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.325403 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1424869a-8bd7-4f1c-9f98-17a826550168" path="/var/lib/kubelet/pods/1424869a-8bd7-4f1c-9f98-17a826550168/volumes" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.398612 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e94ed326-8f56-4933-8616-5814505b58f5-logs\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.398943 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.399085 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vrbv\" (UniqueName: \"kubernetes.io/projected/e94ed326-8f56-4933-8616-5814505b58f5-kube-api-access-5vrbv\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.399207 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.399342 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.399479 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.399602 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.399746 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e94ed326-8f56-4933-8616-5814505b58f5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.501238 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e94ed326-8f56-4933-8616-5814505b58f5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.501325 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e94ed326-8f56-4933-8616-5814505b58f5-logs\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.501356 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.501377 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vrbv\" (UniqueName: \"kubernetes.io/projected/e94ed326-8f56-4933-8616-5814505b58f5-kube-api-access-5vrbv\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.501406 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.501452 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.501518 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.501558 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.502904 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e94ed326-8f56-4933-8616-5814505b58f5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.502969 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.503044 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e94ed326-8f56-4933-8616-5814505b58f5-logs\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.510236 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.511163 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.512186 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.514034 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.524731 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vrbv\" (UniqueName: \"kubernetes.io/projected/e94ed326-8f56-4933-8616-5814505b58f5-kube-api-access-5vrbv\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.545421 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " pod="openstack/glance-default-internal-api-0" Jan 06 08:36:00 crc kubenswrapper[4784]: I0106 08:36:00.835385 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.203094 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-p6pg6"] Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.204578 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.207361 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-vqhzt" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.207735 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.207860 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.246495 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-p6pg6"] Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.266777 4784 generic.go:334] "Generic (PLEG): container finished" podID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerID="b47f2336ef8da7f2f8e1e64def1247955df31b83fd98dd43e5ed16a674198432" exitCode=0 Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.266815 4784 generic.go:334] "Generic (PLEG): container finished" podID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerID="b4c8ef0ae20892b25cfc81d4c896518652ab0b27ef421e3d8a420dafd16a197b" exitCode=2 Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.266823 4784 generic.go:334] "Generic (PLEG): container finished" podID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerID="c4bc7ad63841c61a6608410c245bb3afbfc9514e961be0055d312007c701bf46" exitCode=0 Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.266884 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39","Type":"ContainerDied","Data":"b47f2336ef8da7f2f8e1e64def1247955df31b83fd98dd43e5ed16a674198432"} Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.266941 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39","Type":"ContainerDied","Data":"b4c8ef0ae20892b25cfc81d4c896518652ab0b27ef421e3d8a420dafd16a197b"} Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.267033 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39","Type":"ContainerDied","Data":"c4bc7ad63841c61a6608410c245bb3afbfc9514e961be0055d312007c701bf46"} Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.324437 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-scripts\") pod \"nova-cell0-conductor-db-sync-p6pg6\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.324537 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-p6pg6\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.324820 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqgd7\" (UniqueName: \"kubernetes.io/projected/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-kube-api-access-nqgd7\") pod \"nova-cell0-conductor-db-sync-p6pg6\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.325303 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-config-data\") pod \"nova-cell0-conductor-db-sync-p6pg6\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.427041 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-config-data\") pod \"nova-cell0-conductor-db-sync-p6pg6\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.427192 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-scripts\") pod \"nova-cell0-conductor-db-sync-p6pg6\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.427223 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-p6pg6\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.427251 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqgd7\" (UniqueName: \"kubernetes.io/projected/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-kube-api-access-nqgd7\") pod \"nova-cell0-conductor-db-sync-p6pg6\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.433920 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-scripts\") pod \"nova-cell0-conductor-db-sync-p6pg6\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.436279 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-config-data\") pod \"nova-cell0-conductor-db-sync-p6pg6\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.440489 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-p6pg6\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.462067 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqgd7\" (UniqueName: \"kubernetes.io/projected/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-kube-api-access-nqgd7\") pod \"nova-cell0-conductor-db-sync-p6pg6\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.567434 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:01 crc kubenswrapper[4784]: I0106 08:36:01.665466 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:36:01 crc kubenswrapper[4784]: W0106 08:36:01.674311 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode94ed326_8f56_4933_8616_5814505b58f5.slice/crio-82ac278234fa68eef222653ae3d0d395045b228175b6773e69a06dcbb04b7af7 WatchSource:0}: Error finding container 82ac278234fa68eef222653ae3d0d395045b228175b6773e69a06dcbb04b7af7: Status 404 returned error can't find the container with id 82ac278234fa68eef222653ae3d0d395045b228175b6773e69a06dcbb04b7af7 Jan 06 08:36:02 crc kubenswrapper[4784]: I0106 08:36:02.108489 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-p6pg6"] Jan 06 08:36:02 crc kubenswrapper[4784]: W0106 08:36:02.148357 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5dce1cc6_db1c_4b61_adfe_a20a3751aad3.slice/crio-beb22903755af41bd38a6994eacc03ff93008feb1d1085eb4d332494a6570218 WatchSource:0}: Error finding container beb22903755af41bd38a6994eacc03ff93008feb1d1085eb4d332494a6570218: Status 404 returned error can't find the container with id beb22903755af41bd38a6994eacc03ff93008feb1d1085eb4d332494a6570218 Jan 06 08:36:02 crc kubenswrapper[4784]: I0106 08:36:02.287250 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e94ed326-8f56-4933-8616-5814505b58f5","Type":"ContainerStarted","Data":"82ac278234fa68eef222653ae3d0d395045b228175b6773e69a06dcbb04b7af7"} Jan 06 08:36:02 crc kubenswrapper[4784]: I0106 08:36:02.304965 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-p6pg6" event={"ID":"5dce1cc6-db1c-4b61-adfe-a20a3751aad3","Type":"ContainerStarted","Data":"beb22903755af41bd38a6994eacc03ff93008feb1d1085eb4d332494a6570218"} Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.319728 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e94ed326-8f56-4933-8616-5814505b58f5","Type":"ContainerStarted","Data":"54f4dd31065d6db90e2a8b67fbc54756e16347bc95f37040733d2d6e10eb17a1"} Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.320778 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e94ed326-8f56-4933-8616-5814505b58f5","Type":"ContainerStarted","Data":"538fb0d95d0196cf8efa1743e88a29f2ea31f008245abb93870ac9e8829ca9a8"} Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.359048 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.359022376 podStartE2EDuration="3.359022376s" podCreationTimestamp="2026-01-06 08:36:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:36:03.346175356 +0000 UTC m=+1265.392348203" watchObservedRunningTime="2026-01-06 08:36:03.359022376 +0000 UTC m=+1265.405195213" Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.842403 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.888732 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-run-httpd\") pod \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.888865 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-combined-ca-bundle\") pod \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.888898 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-config-data\") pod \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.888955 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-sg-core-conf-yaml\") pod \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.889124 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-scripts\") pod \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.889229 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fn46h\" (UniqueName: \"kubernetes.io/projected/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-kube-api-access-fn46h\") pod \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.889265 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-log-httpd\") pod \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\" (UID: \"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39\") " Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.889820 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" (UID: "e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.891194 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" (UID: "e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.891256 4784 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.898296 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-scripts" (OuterVolumeSpecName: "scripts") pod "e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" (UID: "e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.910054 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-kube-api-access-fn46h" (OuterVolumeSpecName: "kube-api-access-fn46h") pod "e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" (UID: "e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39"). InnerVolumeSpecName "kube-api-access-fn46h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.937659 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" (UID: "e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.993885 4784 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.993939 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.993953 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fn46h\" (UniqueName: \"kubernetes.io/projected/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-kube-api-access-fn46h\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.993968 4784 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:03 crc kubenswrapper[4784]: I0106 08:36:03.999597 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" (UID: "e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.012111 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-config-data" (OuterVolumeSpecName: "config-data") pod "e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" (UID: "e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.096181 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.096227 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.340409 4784 generic.go:334] "Generic (PLEG): container finished" podID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerID="6473bdada60e3f882229efb50ebd863a01b3d120f7da224d85376ca57a72d692" exitCode=0 Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.340507 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39","Type":"ContainerDied","Data":"6473bdada60e3f882229efb50ebd863a01b3d120f7da224d85376ca57a72d692"} Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.340519 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.340591 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39","Type":"ContainerDied","Data":"a816908a86fdb3eb8cad08de3c3a5339716c0ec63c72c95647507a993dc07bb4"} Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.340630 4784 scope.go:117] "RemoveContainer" containerID="b47f2336ef8da7f2f8e1e64def1247955df31b83fd98dd43e5ed16a674198432" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.373100 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.380196 4784 scope.go:117] "RemoveContainer" containerID="b4c8ef0ae20892b25cfc81d4c896518652ab0b27ef421e3d8a420dafd16a197b" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.381881 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.419437 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:36:04 crc kubenswrapper[4784]: E0106 08:36:04.420263 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="ceilometer-central-agent" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.420344 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="ceilometer-central-agent" Jan 06 08:36:04 crc kubenswrapper[4784]: E0106 08:36:04.420433 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="sg-core" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.420483 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="sg-core" Jan 06 08:36:04 crc kubenswrapper[4784]: E0106 08:36:04.420559 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="ceilometer-notification-agent" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.420625 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="ceilometer-notification-agent" Jan 06 08:36:04 crc kubenswrapper[4784]: E0106 08:36:04.420697 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="proxy-httpd" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.420747 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="proxy-httpd" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.420975 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="sg-core" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.421064 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="ceilometer-central-agent" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.421127 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="proxy-httpd" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.421190 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" containerName="ceilometer-notification-agent" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.438707 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.439038 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.441398 4784 scope.go:117] "RemoveContainer" containerID="c4bc7ad63841c61a6608410c245bb3afbfc9514e961be0055d312007c701bf46" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.442111 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.444216 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.473502 4784 scope.go:117] "RemoveContainer" containerID="6473bdada60e3f882229efb50ebd863a01b3d120f7da224d85376ca57a72d692" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.506456 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb2ffcab-ac3a-45e3-8acf-a4604be90444-run-httpd\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.506752 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-config-data\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.507078 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jkq8\" (UniqueName: \"kubernetes.io/projected/eb2ffcab-ac3a-45e3-8acf-a4604be90444-kube-api-access-6jkq8\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.507381 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb2ffcab-ac3a-45e3-8acf-a4604be90444-log-httpd\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.507555 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.507615 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-scripts\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.507637 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.514381 4784 scope.go:117] "RemoveContainer" containerID="b47f2336ef8da7f2f8e1e64def1247955df31b83fd98dd43e5ed16a674198432" Jan 06 08:36:04 crc kubenswrapper[4784]: E0106 08:36:04.515114 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b47f2336ef8da7f2f8e1e64def1247955df31b83fd98dd43e5ed16a674198432\": container with ID starting with b47f2336ef8da7f2f8e1e64def1247955df31b83fd98dd43e5ed16a674198432 not found: ID does not exist" containerID="b47f2336ef8da7f2f8e1e64def1247955df31b83fd98dd43e5ed16a674198432" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.515158 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b47f2336ef8da7f2f8e1e64def1247955df31b83fd98dd43e5ed16a674198432"} err="failed to get container status \"b47f2336ef8da7f2f8e1e64def1247955df31b83fd98dd43e5ed16a674198432\": rpc error: code = NotFound desc = could not find container \"b47f2336ef8da7f2f8e1e64def1247955df31b83fd98dd43e5ed16a674198432\": container with ID starting with b47f2336ef8da7f2f8e1e64def1247955df31b83fd98dd43e5ed16a674198432 not found: ID does not exist" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.515186 4784 scope.go:117] "RemoveContainer" containerID="b4c8ef0ae20892b25cfc81d4c896518652ab0b27ef421e3d8a420dafd16a197b" Jan 06 08:36:04 crc kubenswrapper[4784]: E0106 08:36:04.515604 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4c8ef0ae20892b25cfc81d4c896518652ab0b27ef421e3d8a420dafd16a197b\": container with ID starting with b4c8ef0ae20892b25cfc81d4c896518652ab0b27ef421e3d8a420dafd16a197b not found: ID does not exist" containerID="b4c8ef0ae20892b25cfc81d4c896518652ab0b27ef421e3d8a420dafd16a197b" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.515710 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4c8ef0ae20892b25cfc81d4c896518652ab0b27ef421e3d8a420dafd16a197b"} err="failed to get container status \"b4c8ef0ae20892b25cfc81d4c896518652ab0b27ef421e3d8a420dafd16a197b\": rpc error: code = NotFound desc = could not find container \"b4c8ef0ae20892b25cfc81d4c896518652ab0b27ef421e3d8a420dafd16a197b\": container with ID starting with b4c8ef0ae20892b25cfc81d4c896518652ab0b27ef421e3d8a420dafd16a197b not found: ID does not exist" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.515823 4784 scope.go:117] "RemoveContainer" containerID="c4bc7ad63841c61a6608410c245bb3afbfc9514e961be0055d312007c701bf46" Jan 06 08:36:04 crc kubenswrapper[4784]: E0106 08:36:04.516692 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4bc7ad63841c61a6608410c245bb3afbfc9514e961be0055d312007c701bf46\": container with ID starting with c4bc7ad63841c61a6608410c245bb3afbfc9514e961be0055d312007c701bf46 not found: ID does not exist" containerID="c4bc7ad63841c61a6608410c245bb3afbfc9514e961be0055d312007c701bf46" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.516725 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4bc7ad63841c61a6608410c245bb3afbfc9514e961be0055d312007c701bf46"} err="failed to get container status \"c4bc7ad63841c61a6608410c245bb3afbfc9514e961be0055d312007c701bf46\": rpc error: code = NotFound desc = could not find container \"c4bc7ad63841c61a6608410c245bb3afbfc9514e961be0055d312007c701bf46\": container with ID starting with c4bc7ad63841c61a6608410c245bb3afbfc9514e961be0055d312007c701bf46 not found: ID does not exist" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.516748 4784 scope.go:117] "RemoveContainer" containerID="6473bdada60e3f882229efb50ebd863a01b3d120f7da224d85376ca57a72d692" Jan 06 08:36:04 crc kubenswrapper[4784]: E0106 08:36:04.517042 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6473bdada60e3f882229efb50ebd863a01b3d120f7da224d85376ca57a72d692\": container with ID starting with 6473bdada60e3f882229efb50ebd863a01b3d120f7da224d85376ca57a72d692 not found: ID does not exist" containerID="6473bdada60e3f882229efb50ebd863a01b3d120f7da224d85376ca57a72d692" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.517093 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6473bdada60e3f882229efb50ebd863a01b3d120f7da224d85376ca57a72d692"} err="failed to get container status \"6473bdada60e3f882229efb50ebd863a01b3d120f7da224d85376ca57a72d692\": rpc error: code = NotFound desc = could not find container \"6473bdada60e3f882229efb50ebd863a01b3d120f7da224d85376ca57a72d692\": container with ID starting with 6473bdada60e3f882229efb50ebd863a01b3d120f7da224d85376ca57a72d692 not found: ID does not exist" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.609155 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.609224 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-scripts\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.609274 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.609858 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb2ffcab-ac3a-45e3-8acf-a4604be90444-run-httpd\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.609912 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-config-data\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.609996 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jkq8\" (UniqueName: \"kubernetes.io/projected/eb2ffcab-ac3a-45e3-8acf-a4604be90444-kube-api-access-6jkq8\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.610048 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb2ffcab-ac3a-45e3-8acf-a4604be90444-log-httpd\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.610337 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb2ffcab-ac3a-45e3-8acf-a4604be90444-run-httpd\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.610729 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb2ffcab-ac3a-45e3-8acf-a4604be90444-log-httpd\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.615163 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-scripts\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.616843 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-config-data\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.624322 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.629593 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.630239 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jkq8\" (UniqueName: \"kubernetes.io/projected/eb2ffcab-ac3a-45e3-8acf-a4604be90444-kube-api-access-6jkq8\") pod \"ceilometer-0\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " pod="openstack/ceilometer-0" Jan 06 08:36:04 crc kubenswrapper[4784]: I0106 08:36:04.756951 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:36:05 crc kubenswrapper[4784]: I0106 08:36:05.340145 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:36:06 crc kubenswrapper[4784]: I0106 08:36:06.334159 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39" path="/var/lib/kubelet/pods/e68d29b4-9b6e-4bff-a3ac-30cc47cb7e39/volumes" Jan 06 08:36:06 crc kubenswrapper[4784]: I0106 08:36:06.381317 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb2ffcab-ac3a-45e3-8acf-a4604be90444","Type":"ContainerStarted","Data":"2a6495b69622fa93cf5b2da2c4a43176c6e505ab086bc090bb90d4e23928c1b7"} Jan 06 08:36:07 crc kubenswrapper[4784]: I0106 08:36:07.794686 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 06 08:36:07 crc kubenswrapper[4784]: I0106 08:36:07.794754 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 06 08:36:07 crc kubenswrapper[4784]: I0106 08:36:07.830031 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 06 08:36:07 crc kubenswrapper[4784]: I0106 08:36:07.844152 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 06 08:36:08 crc kubenswrapper[4784]: I0106 08:36:08.404703 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 06 08:36:08 crc kubenswrapper[4784]: I0106 08:36:08.404757 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 06 08:36:10 crc kubenswrapper[4784]: I0106 08:36:10.433222 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 06 08:36:10 crc kubenswrapper[4784]: I0106 08:36:10.466294 4784 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 06 08:36:10 crc kubenswrapper[4784]: I0106 08:36:10.471450 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 06 08:36:10 crc kubenswrapper[4784]: I0106 08:36:10.836335 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 06 08:36:10 crc kubenswrapper[4784]: I0106 08:36:10.836850 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 06 08:36:10 crc kubenswrapper[4784]: I0106 08:36:10.872663 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 06 08:36:10 crc kubenswrapper[4784]: I0106 08:36:10.892624 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 06 08:36:11 crc kubenswrapper[4784]: I0106 08:36:11.487649 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb2ffcab-ac3a-45e3-8acf-a4604be90444","Type":"ContainerStarted","Data":"35dd0f46d6e5474f6abaa8d280c63667bdb43f425793cbafe842053332f49e3e"} Jan 06 08:36:11 crc kubenswrapper[4784]: I0106 08:36:11.507516 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-p6pg6" event={"ID":"5dce1cc6-db1c-4b61-adfe-a20a3751aad3","Type":"ContainerStarted","Data":"00bd19712a8124651f5eae66da5fb56d3422ce328886923af0f40f521d7f84fd"} Jan 06 08:36:11 crc kubenswrapper[4784]: I0106 08:36:11.507628 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 06 08:36:11 crc kubenswrapper[4784]: I0106 08:36:11.507649 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 06 08:36:11 crc kubenswrapper[4784]: I0106 08:36:11.531925 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-p6pg6" podStartSLOduration=1.565237738 podStartE2EDuration="10.5318934s" podCreationTimestamp="2026-01-06 08:36:01 +0000 UTC" firstStartedPulling="2026-01-06 08:36:02.166452501 +0000 UTC m=+1264.212625338" lastFinishedPulling="2026-01-06 08:36:11.133108163 +0000 UTC m=+1273.179281000" observedRunningTime="2026-01-06 08:36:11.52771273 +0000 UTC m=+1273.573885567" watchObservedRunningTime="2026-01-06 08:36:11.5318934 +0000 UTC m=+1273.578066247" Jan 06 08:36:12 crc kubenswrapper[4784]: I0106 08:36:12.519429 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb2ffcab-ac3a-45e3-8acf-a4604be90444","Type":"ContainerStarted","Data":"290d01665ba80eec4d252487e3be4e06f7105f19aaec277d126da7d8f458b219"} Jan 06 08:36:13 crc kubenswrapper[4784]: I0106 08:36:13.532386 4784 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 06 08:36:13 crc kubenswrapper[4784]: I0106 08:36:13.532914 4784 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 06 08:36:13 crc kubenswrapper[4784]: I0106 08:36:13.957903 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 06 08:36:13 crc kubenswrapper[4784]: I0106 08:36:13.965134 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 06 08:36:14 crc kubenswrapper[4784]: I0106 08:36:14.544525 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb2ffcab-ac3a-45e3-8acf-a4604be90444","Type":"ContainerStarted","Data":"9db8fbcec626656592924a06b0f3af9d0a8e07f257adf5223b12f6ac5c873b4f"} Jan 06 08:36:15 crc kubenswrapper[4784]: I0106 08:36:15.588242 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb2ffcab-ac3a-45e3-8acf-a4604be90444","Type":"ContainerStarted","Data":"717f5ac3058902d9ecd453090dafa7bf034dbcf5d2373c74cb87d5e8ed0e9fa6"} Jan 06 08:36:15 crc kubenswrapper[4784]: I0106 08:36:15.588353 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 06 08:36:15 crc kubenswrapper[4784]: I0106 08:36:15.620479 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.935121873 podStartE2EDuration="11.620458594s" podCreationTimestamp="2026-01-06 08:36:04 +0000 UTC" firstStartedPulling="2026-01-06 08:36:05.366634601 +0000 UTC m=+1267.412807438" lastFinishedPulling="2026-01-06 08:36:15.051971312 +0000 UTC m=+1277.098144159" observedRunningTime="2026-01-06 08:36:15.610799763 +0000 UTC m=+1277.656972610" watchObservedRunningTime="2026-01-06 08:36:15.620458594 +0000 UTC m=+1277.666631431" Jan 06 08:36:16 crc kubenswrapper[4784]: I0106 08:36:16.361318 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:36:17 crc kubenswrapper[4784]: I0106 08:36:17.605734 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="ceilometer-central-agent" containerID="cri-o://35dd0f46d6e5474f6abaa8d280c63667bdb43f425793cbafe842053332f49e3e" gracePeriod=30 Jan 06 08:36:17 crc kubenswrapper[4784]: I0106 08:36:17.605865 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="sg-core" containerID="cri-o://9db8fbcec626656592924a06b0f3af9d0a8e07f257adf5223b12f6ac5c873b4f" gracePeriod=30 Jan 06 08:36:17 crc kubenswrapper[4784]: I0106 08:36:17.605904 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="ceilometer-notification-agent" containerID="cri-o://290d01665ba80eec4d252487e3be4e06f7105f19aaec277d126da7d8f458b219" gracePeriod=30 Jan 06 08:36:17 crc kubenswrapper[4784]: I0106 08:36:17.605771 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="proxy-httpd" containerID="cri-o://717f5ac3058902d9ecd453090dafa7bf034dbcf5d2373c74cb87d5e8ed0e9fa6" gracePeriod=30 Jan 06 08:36:18 crc kubenswrapper[4784]: I0106 08:36:18.623201 4784 generic.go:334] "Generic (PLEG): container finished" podID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerID="717f5ac3058902d9ecd453090dafa7bf034dbcf5d2373c74cb87d5e8ed0e9fa6" exitCode=0 Jan 06 08:36:18 crc kubenswrapper[4784]: I0106 08:36:18.623586 4784 generic.go:334] "Generic (PLEG): container finished" podID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerID="9db8fbcec626656592924a06b0f3af9d0a8e07f257adf5223b12f6ac5c873b4f" exitCode=2 Jan 06 08:36:18 crc kubenswrapper[4784]: I0106 08:36:18.623603 4784 generic.go:334] "Generic (PLEG): container finished" podID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerID="290d01665ba80eec4d252487e3be4e06f7105f19aaec277d126da7d8f458b219" exitCode=0 Jan 06 08:36:18 crc kubenswrapper[4784]: I0106 08:36:18.623246 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb2ffcab-ac3a-45e3-8acf-a4604be90444","Type":"ContainerDied","Data":"717f5ac3058902d9ecd453090dafa7bf034dbcf5d2373c74cb87d5e8ed0e9fa6"} Jan 06 08:36:18 crc kubenswrapper[4784]: I0106 08:36:18.623664 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb2ffcab-ac3a-45e3-8acf-a4604be90444","Type":"ContainerDied","Data":"9db8fbcec626656592924a06b0f3af9d0a8e07f257adf5223b12f6ac5c873b4f"} Jan 06 08:36:18 crc kubenswrapper[4784]: I0106 08:36:18.623692 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb2ffcab-ac3a-45e3-8acf-a4604be90444","Type":"ContainerDied","Data":"290d01665ba80eec4d252487e3be4e06f7105f19aaec277d126da7d8f458b219"} Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.678931 4784 generic.go:334] "Generic (PLEG): container finished" podID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerID="35dd0f46d6e5474f6abaa8d280c63667bdb43f425793cbafe842053332f49e3e" exitCode=0 Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.679109 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb2ffcab-ac3a-45e3-8acf-a4604be90444","Type":"ContainerDied","Data":"35dd0f46d6e5474f6abaa8d280c63667bdb43f425793cbafe842053332f49e3e"} Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.679266 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb2ffcab-ac3a-45e3-8acf-a4604be90444","Type":"ContainerDied","Data":"2a6495b69622fa93cf5b2da2c4a43176c6e505ab086bc090bb90d4e23928c1b7"} Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.679285 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a6495b69622fa93cf5b2da2c4a43176c6e505ab086bc090bb90d4e23928c1b7" Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.693454 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.870586 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb2ffcab-ac3a-45e3-8acf-a4604be90444-run-httpd\") pod \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.870665 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-sg-core-conf-yaml\") pod \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.870743 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-scripts\") pod \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.870812 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-config-data\") pod \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.870831 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb2ffcab-ac3a-45e3-8acf-a4604be90444-log-httpd\") pod \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.870967 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jkq8\" (UniqueName: \"kubernetes.io/projected/eb2ffcab-ac3a-45e3-8acf-a4604be90444-kube-api-access-6jkq8\") pod \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.871040 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-combined-ca-bundle\") pod \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.872807 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb2ffcab-ac3a-45e3-8acf-a4604be90444-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "eb2ffcab-ac3a-45e3-8acf-a4604be90444" (UID: "eb2ffcab-ac3a-45e3-8acf-a4604be90444"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.875068 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb2ffcab-ac3a-45e3-8acf-a4604be90444-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "eb2ffcab-ac3a-45e3-8acf-a4604be90444" (UID: "eb2ffcab-ac3a-45e3-8acf-a4604be90444"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.896417 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb2ffcab-ac3a-45e3-8acf-a4604be90444-kube-api-access-6jkq8" (OuterVolumeSpecName: "kube-api-access-6jkq8") pod "eb2ffcab-ac3a-45e3-8acf-a4604be90444" (UID: "eb2ffcab-ac3a-45e3-8acf-a4604be90444"). InnerVolumeSpecName "kube-api-access-6jkq8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.898046 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-scripts" (OuterVolumeSpecName: "scripts") pod "eb2ffcab-ac3a-45e3-8acf-a4604be90444" (UID: "eb2ffcab-ac3a-45e3-8acf-a4604be90444"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.924649 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "eb2ffcab-ac3a-45e3-8acf-a4604be90444" (UID: "eb2ffcab-ac3a-45e3-8acf-a4604be90444"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.972950 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eb2ffcab-ac3a-45e3-8acf-a4604be90444" (UID: "eb2ffcab-ac3a-45e3-8acf-a4604be90444"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.973103 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-combined-ca-bundle\") pod \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\" (UID: \"eb2ffcab-ac3a-45e3-8acf-a4604be90444\") " Jan 06 08:36:22 crc kubenswrapper[4784]: W0106 08:36:22.973292 4784 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/eb2ffcab-ac3a-45e3-8acf-a4604be90444/volumes/kubernetes.io~secret/combined-ca-bundle Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.973312 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eb2ffcab-ac3a-45e3-8acf-a4604be90444" (UID: "eb2ffcab-ac3a-45e3-8acf-a4604be90444"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.973733 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jkq8\" (UniqueName: \"kubernetes.io/projected/eb2ffcab-ac3a-45e3-8acf-a4604be90444-kube-api-access-6jkq8\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.973765 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.973776 4784 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb2ffcab-ac3a-45e3-8acf-a4604be90444-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.973791 4784 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.973803 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:22 crc kubenswrapper[4784]: I0106 08:36:22.973819 4784 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb2ffcab-ac3a-45e3-8acf-a4604be90444-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.000448 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-config-data" (OuterVolumeSpecName: "config-data") pod "eb2ffcab-ac3a-45e3-8acf-a4604be90444" (UID: "eb2ffcab-ac3a-45e3-8acf-a4604be90444"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.079644 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb2ffcab-ac3a-45e3-8acf-a4604be90444-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.689338 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.731057 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.748438 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.779380 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:36:23 crc kubenswrapper[4784]: E0106 08:36:23.779840 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="ceilometer-notification-agent" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.779861 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="ceilometer-notification-agent" Jan 06 08:36:23 crc kubenswrapper[4784]: E0106 08:36:23.779885 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="ceilometer-central-agent" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.779893 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="ceilometer-central-agent" Jan 06 08:36:23 crc kubenswrapper[4784]: E0106 08:36:23.779912 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="sg-core" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.779943 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="sg-core" Jan 06 08:36:23 crc kubenswrapper[4784]: E0106 08:36:23.779960 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="proxy-httpd" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.779966 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="proxy-httpd" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.780140 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="sg-core" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.780157 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="proxy-httpd" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.780174 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="ceilometer-notification-agent" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.780184 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" containerName="ceilometer-central-agent" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.781891 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.785754 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.785959 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.794994 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-scripts\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.795166 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.795336 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.795392 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/057af17f-86ff-476f-af9c-6efa26be4a78-run-httpd\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.795447 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/057af17f-86ff-476f-af9c-6efa26be4a78-log-httpd\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.795516 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r95x5\" (UniqueName: \"kubernetes.io/projected/057af17f-86ff-476f-af9c-6efa26be4a78-kube-api-access-r95x5\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.795822 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-config-data\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.800600 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.897927 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-config-data\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.898026 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-scripts\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.898110 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.898199 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.898244 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/057af17f-86ff-476f-af9c-6efa26be4a78-run-httpd\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.898277 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/057af17f-86ff-476f-af9c-6efa26be4a78-log-httpd\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.898388 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r95x5\" (UniqueName: \"kubernetes.io/projected/057af17f-86ff-476f-af9c-6efa26be4a78-kube-api-access-r95x5\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.899228 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/057af17f-86ff-476f-af9c-6efa26be4a78-run-httpd\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.899441 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/057af17f-86ff-476f-af9c-6efa26be4a78-log-httpd\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.904933 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.905195 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.905214 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-config-data\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.913924 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-scripts\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:23 crc kubenswrapper[4784]: I0106 08:36:23.919392 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r95x5\" (UniqueName: \"kubernetes.io/projected/057af17f-86ff-476f-af9c-6efa26be4a78-kube-api-access-r95x5\") pod \"ceilometer-0\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " pod="openstack/ceilometer-0" Jan 06 08:36:24 crc kubenswrapper[4784]: I0106 08:36:24.108728 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:36:24 crc kubenswrapper[4784]: I0106 08:36:24.330022 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb2ffcab-ac3a-45e3-8acf-a4604be90444" path="/var/lib/kubelet/pods/eb2ffcab-ac3a-45e3-8acf-a4604be90444/volumes" Jan 06 08:36:24 crc kubenswrapper[4784]: W0106 08:36:24.630453 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod057af17f_86ff_476f_af9c_6efa26be4a78.slice/crio-33122c7976b71953d16c6a62e84489d69d0e82fceac777319e01e873eb88067f WatchSource:0}: Error finding container 33122c7976b71953d16c6a62e84489d69d0e82fceac777319e01e873eb88067f: Status 404 returned error can't find the container with id 33122c7976b71953d16c6a62e84489d69d0e82fceac777319e01e873eb88067f Jan 06 08:36:24 crc kubenswrapper[4784]: I0106 08:36:24.633122 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:36:24 crc kubenswrapper[4784]: I0106 08:36:24.701609 4784 generic.go:334] "Generic (PLEG): container finished" podID="5dce1cc6-db1c-4b61-adfe-a20a3751aad3" containerID="00bd19712a8124651f5eae66da5fb56d3422ce328886923af0f40f521d7f84fd" exitCode=0 Jan 06 08:36:24 crc kubenswrapper[4784]: I0106 08:36:24.701680 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-p6pg6" event={"ID":"5dce1cc6-db1c-4b61-adfe-a20a3751aad3","Type":"ContainerDied","Data":"00bd19712a8124651f5eae66da5fb56d3422ce328886923af0f40f521d7f84fd"} Jan 06 08:36:24 crc kubenswrapper[4784]: I0106 08:36:24.704227 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"057af17f-86ff-476f-af9c-6efa26be4a78","Type":"ContainerStarted","Data":"33122c7976b71953d16c6a62e84489d69d0e82fceac777319e01e873eb88067f"} Jan 06 08:36:25 crc kubenswrapper[4784]: I0106 08:36:25.719283 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"057af17f-86ff-476f-af9c-6efa26be4a78","Type":"ContainerStarted","Data":"df6b4ff4ec1813373c879a78abfb7d42271c55b79f5ad94c0957e1d4fa80e024"} Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.144440 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.248778 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqgd7\" (UniqueName: \"kubernetes.io/projected/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-kube-api-access-nqgd7\") pod \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.248818 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-config-data\") pod \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.248881 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-combined-ca-bundle\") pod \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.249042 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-scripts\") pod \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\" (UID: \"5dce1cc6-db1c-4b61-adfe-a20a3751aad3\") " Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.254898 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-kube-api-access-nqgd7" (OuterVolumeSpecName: "kube-api-access-nqgd7") pod "5dce1cc6-db1c-4b61-adfe-a20a3751aad3" (UID: "5dce1cc6-db1c-4b61-adfe-a20a3751aad3"). InnerVolumeSpecName "kube-api-access-nqgd7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.261621 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-scripts" (OuterVolumeSpecName: "scripts") pod "5dce1cc6-db1c-4b61-adfe-a20a3751aad3" (UID: "5dce1cc6-db1c-4b61-adfe-a20a3751aad3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.277291 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-config-data" (OuterVolumeSpecName: "config-data") pod "5dce1cc6-db1c-4b61-adfe-a20a3751aad3" (UID: "5dce1cc6-db1c-4b61-adfe-a20a3751aad3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.284826 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5dce1cc6-db1c-4b61-adfe-a20a3751aad3" (UID: "5dce1cc6-db1c-4b61-adfe-a20a3751aad3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.359379 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.359440 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqgd7\" (UniqueName: \"kubernetes.io/projected/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-kube-api-access-nqgd7\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.359457 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.359471 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dce1cc6-db1c-4b61-adfe-a20a3751aad3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.731098 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"057af17f-86ff-476f-af9c-6efa26be4a78","Type":"ContainerStarted","Data":"1c0f308eb62410e344ceec6e1c1967af77eb816328fd19172f47e8bdbe4ceeb0"} Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.732647 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-p6pg6" event={"ID":"5dce1cc6-db1c-4b61-adfe-a20a3751aad3","Type":"ContainerDied","Data":"beb22903755af41bd38a6994eacc03ff93008feb1d1085eb4d332494a6570218"} Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.732676 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="beb22903755af41bd38a6994eacc03ff93008feb1d1085eb4d332494a6570218" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.732759 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-p6pg6" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.956041 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 06 08:36:26 crc kubenswrapper[4784]: E0106 08:36:26.957156 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dce1cc6-db1c-4b61-adfe-a20a3751aad3" containerName="nova-cell0-conductor-db-sync" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.957183 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dce1cc6-db1c-4b61-adfe-a20a3751aad3" containerName="nova-cell0-conductor-db-sync" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.957403 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="5dce1cc6-db1c-4b61-adfe-a20a3751aad3" containerName="nova-cell0-conductor-db-sync" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.958156 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.985892 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.986332 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-vqhzt" Jan 06 08:36:26 crc kubenswrapper[4784]: I0106 08:36:26.999781 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 06 08:36:27 crc kubenswrapper[4784]: I0106 08:36:27.074855 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dd5733d-6502-4030-a012-be296b7d11c1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"4dd5733d-6502-4030-a012-be296b7d11c1\") " pod="openstack/nova-cell0-conductor-0" Jan 06 08:36:27 crc kubenswrapper[4784]: I0106 08:36:27.074927 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w592f\" (UniqueName: \"kubernetes.io/projected/4dd5733d-6502-4030-a012-be296b7d11c1-kube-api-access-w592f\") pod \"nova-cell0-conductor-0\" (UID: \"4dd5733d-6502-4030-a012-be296b7d11c1\") " pod="openstack/nova-cell0-conductor-0" Jan 06 08:36:27 crc kubenswrapper[4784]: I0106 08:36:27.074995 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dd5733d-6502-4030-a012-be296b7d11c1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"4dd5733d-6502-4030-a012-be296b7d11c1\") " pod="openstack/nova-cell0-conductor-0" Jan 06 08:36:27 crc kubenswrapper[4784]: I0106 08:36:27.176241 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dd5733d-6502-4030-a012-be296b7d11c1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"4dd5733d-6502-4030-a012-be296b7d11c1\") " pod="openstack/nova-cell0-conductor-0" Jan 06 08:36:27 crc kubenswrapper[4784]: I0106 08:36:27.176403 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dd5733d-6502-4030-a012-be296b7d11c1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"4dd5733d-6502-4030-a012-be296b7d11c1\") " pod="openstack/nova-cell0-conductor-0" Jan 06 08:36:27 crc kubenswrapper[4784]: I0106 08:36:27.176462 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w592f\" (UniqueName: \"kubernetes.io/projected/4dd5733d-6502-4030-a012-be296b7d11c1-kube-api-access-w592f\") pod \"nova-cell0-conductor-0\" (UID: \"4dd5733d-6502-4030-a012-be296b7d11c1\") " pod="openstack/nova-cell0-conductor-0" Jan 06 08:36:27 crc kubenswrapper[4784]: I0106 08:36:27.184498 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dd5733d-6502-4030-a012-be296b7d11c1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"4dd5733d-6502-4030-a012-be296b7d11c1\") " pod="openstack/nova-cell0-conductor-0" Jan 06 08:36:27 crc kubenswrapper[4784]: I0106 08:36:27.195164 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dd5733d-6502-4030-a012-be296b7d11c1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"4dd5733d-6502-4030-a012-be296b7d11c1\") " pod="openstack/nova-cell0-conductor-0" Jan 06 08:36:27 crc kubenswrapper[4784]: I0106 08:36:27.197971 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w592f\" (UniqueName: \"kubernetes.io/projected/4dd5733d-6502-4030-a012-be296b7d11c1-kube-api-access-w592f\") pod \"nova-cell0-conductor-0\" (UID: \"4dd5733d-6502-4030-a012-be296b7d11c1\") " pod="openstack/nova-cell0-conductor-0" Jan 06 08:36:27 crc kubenswrapper[4784]: I0106 08:36:27.382536 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 06 08:36:27 crc kubenswrapper[4784]: I0106 08:36:27.744356 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"057af17f-86ff-476f-af9c-6efa26be4a78","Type":"ContainerStarted","Data":"0a808ac1dce6336f7ef9630eb1360c90430154927b47f863c65eb69120f1b98e"} Jan 06 08:36:27 crc kubenswrapper[4784]: I0106 08:36:27.859941 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 06 08:36:28 crc kubenswrapper[4784]: I0106 08:36:28.765272 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"4dd5733d-6502-4030-a012-be296b7d11c1","Type":"ContainerStarted","Data":"c2c0ff5bb0a05540e0092ee7d0986a984f68d0ed56bed0238c7fd744e6c37ee5"} Jan 06 08:36:28 crc kubenswrapper[4784]: I0106 08:36:28.765933 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"4dd5733d-6502-4030-a012-be296b7d11c1","Type":"ContainerStarted","Data":"26ae5fd9dd796b58a427fbb8e745aaa9de4b2972ab006a63b81014975f2904d4"} Jan 06 08:36:28 crc kubenswrapper[4784]: I0106 08:36:28.765990 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 06 08:36:28 crc kubenswrapper[4784]: I0106 08:36:28.770775 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"057af17f-86ff-476f-af9c-6efa26be4a78","Type":"ContainerStarted","Data":"81b20e5fca22566e8b8fc266bdff072228a3d355b9a05d135474683106731e55"} Jan 06 08:36:28 crc kubenswrapper[4784]: I0106 08:36:28.771112 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 06 08:36:28 crc kubenswrapper[4784]: I0106 08:36:28.792785 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.792756094 podStartE2EDuration="2.792756094s" podCreationTimestamp="2026-01-06 08:36:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:36:28.782203314 +0000 UTC m=+1290.828376151" watchObservedRunningTime="2026-01-06 08:36:28.792756094 +0000 UTC m=+1290.838928931" Jan 06 08:36:28 crc kubenswrapper[4784]: I0106 08:36:28.831446 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.146871496 podStartE2EDuration="5.831420398s" podCreationTimestamp="2026-01-06 08:36:23 +0000 UTC" firstStartedPulling="2026-01-06 08:36:24.634101888 +0000 UTC m=+1286.680274755" lastFinishedPulling="2026-01-06 08:36:28.31865082 +0000 UTC m=+1290.364823657" observedRunningTime="2026-01-06 08:36:28.819506066 +0000 UTC m=+1290.865678923" watchObservedRunningTime="2026-01-06 08:36:28.831420398 +0000 UTC m=+1290.877593235" Jan 06 08:36:37 crc kubenswrapper[4784]: I0106 08:36:37.414320 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.053267 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-cwjjl"] Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.054967 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.056764 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.057467 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.067717 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-cwjjl"] Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.204811 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-config-data\") pod \"nova-cell0-cell-mapping-cwjjl\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.204886 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-cwjjl\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.205318 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-scripts\") pod \"nova-cell0-cell-mapping-cwjjl\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.205365 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmm6k\" (UniqueName: \"kubernetes.io/projected/a7af75ef-c428-4d9b-8887-4576bc478e80-kube-api-access-nmm6k\") pod \"nova-cell0-cell-mapping-cwjjl\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.238342 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.240468 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.245819 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.255066 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.257120 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.261280 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.280855 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.296212 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.307107 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-scripts\") pod \"nova-cell0-cell-mapping-cwjjl\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.307163 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmm6k\" (UniqueName: \"kubernetes.io/projected/a7af75ef-c428-4d9b-8887-4576bc478e80-kube-api-access-nmm6k\") pod \"nova-cell0-cell-mapping-cwjjl\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.307219 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-config-data\") pod \"nova-cell0-cell-mapping-cwjjl\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.307242 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-cwjjl\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.317474 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-scripts\") pod \"nova-cell0-cell-mapping-cwjjl\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.319786 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-cwjjl\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.320482 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-config-data\") pod \"nova-cell0-cell-mapping-cwjjl\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.368467 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmm6k\" (UniqueName: \"kubernetes.io/projected/a7af75ef-c428-4d9b-8887-4576bc478e80-kube-api-access-nmm6k\") pod \"nova-cell0-cell-mapping-cwjjl\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.401381 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.417305 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/025631c8-6dc9-4fc3-8492-5a9ce369b79d-config-data\") pod \"nova-scheduler-0\" (UID: \"025631c8-6dc9-4fc3-8492-5a9ce369b79d\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.417384 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c811477-8a61-494d-88ce-3642c7becc92-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " pod="openstack/nova-api-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.417453 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c811477-8a61-494d-88ce-3642c7becc92-config-data\") pod \"nova-api-0\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " pod="openstack/nova-api-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.438756 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxrkz\" (UniqueName: \"kubernetes.io/projected/025631c8-6dc9-4fc3-8492-5a9ce369b79d-kube-api-access-jxrkz\") pod \"nova-scheduler-0\" (UID: \"025631c8-6dc9-4fc3-8492-5a9ce369b79d\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.438845 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zlmc\" (UniqueName: \"kubernetes.io/projected/4c811477-8a61-494d-88ce-3642c7becc92-kube-api-access-2zlmc\") pod \"nova-api-0\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " pod="openstack/nova-api-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.438932 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c811477-8a61-494d-88ce-3642c7becc92-logs\") pod \"nova-api-0\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " pod="openstack/nova-api-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.439120 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/025631c8-6dc9-4fc3-8492-5a9ce369b79d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"025631c8-6dc9-4fc3-8492-5a9ce369b79d\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.542383 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/025631c8-6dc9-4fc3-8492-5a9ce369b79d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"025631c8-6dc9-4fc3-8492-5a9ce369b79d\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.542897 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/025631c8-6dc9-4fc3-8492-5a9ce369b79d-config-data\") pod \"nova-scheduler-0\" (UID: \"025631c8-6dc9-4fc3-8492-5a9ce369b79d\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.542967 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c811477-8a61-494d-88ce-3642c7becc92-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " pod="openstack/nova-api-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.543056 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c811477-8a61-494d-88ce-3642c7becc92-config-data\") pod \"nova-api-0\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " pod="openstack/nova-api-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.543077 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxrkz\" (UniqueName: \"kubernetes.io/projected/025631c8-6dc9-4fc3-8492-5a9ce369b79d-kube-api-access-jxrkz\") pod \"nova-scheduler-0\" (UID: \"025631c8-6dc9-4fc3-8492-5a9ce369b79d\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.543094 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zlmc\" (UniqueName: \"kubernetes.io/projected/4c811477-8a61-494d-88ce-3642c7becc92-kube-api-access-2zlmc\") pod \"nova-api-0\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " pod="openstack/nova-api-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.543132 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c811477-8a61-494d-88ce-3642c7becc92-logs\") pod \"nova-api-0\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " pod="openstack/nova-api-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.545104 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c811477-8a61-494d-88ce-3642c7becc92-logs\") pod \"nova-api-0\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " pod="openstack/nova-api-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.550465 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c811477-8a61-494d-88ce-3642c7becc92-config-data\") pod \"nova-api-0\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " pod="openstack/nova-api-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.559332 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/025631c8-6dc9-4fc3-8492-5a9ce369b79d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"025631c8-6dc9-4fc3-8492-5a9ce369b79d\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.559911 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c811477-8a61-494d-88ce-3642c7becc92-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " pod="openstack/nova-api-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.566300 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/025631c8-6dc9-4fc3-8492-5a9ce369b79d-config-data\") pod \"nova-scheduler-0\" (UID: \"025631c8-6dc9-4fc3-8492-5a9ce369b79d\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.609257 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.610665 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.611116 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxrkz\" (UniqueName: \"kubernetes.io/projected/025631c8-6dc9-4fc3-8492-5a9ce369b79d-kube-api-access-jxrkz\") pod \"nova-scheduler-0\" (UID: \"025631c8-6dc9-4fc3-8492-5a9ce369b79d\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.624157 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.643112 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zlmc\" (UniqueName: \"kubernetes.io/projected/4c811477-8a61-494d-88ce-3642c7becc92-kube-api-access-2zlmc\") pod \"nova-api-0\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " pod="openstack/nova-api-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.677954 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.680281 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.695149 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.708067 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.729645 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.750996 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/470ef208-e9ff-49ee-ae66-212a38542ab8-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"470ef208-e9ff-49ee-ae66-212a38542ab8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.751398 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/470ef208-e9ff-49ee-ae66-212a38542ab8-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"470ef208-e9ff-49ee-ae66-212a38542ab8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.751533 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzg2f\" (UniqueName: \"kubernetes.io/projected/470ef208-e9ff-49ee-ae66-212a38542ab8-kube-api-access-tzg2f\") pod \"nova-cell1-novncproxy-0\" (UID: \"470ef208-e9ff-49ee-ae66-212a38542ab8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.853167 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f1895a1-2969-494e-b2eb-8f13b0816697-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " pod="openstack/nova-metadata-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.853249 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/470ef208-e9ff-49ee-ae66-212a38542ab8-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"470ef208-e9ff-49ee-ae66-212a38542ab8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.853291 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzg2f\" (UniqueName: \"kubernetes.io/projected/470ef208-e9ff-49ee-ae66-212a38542ab8-kube-api-access-tzg2f\") pod \"nova-cell1-novncproxy-0\" (UID: \"470ef208-e9ff-49ee-ae66-212a38542ab8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.853316 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7f1895a1-2969-494e-b2eb-8f13b0816697-logs\") pod \"nova-metadata-0\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " pod="openstack/nova-metadata-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.853332 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4t7vv\" (UniqueName: \"kubernetes.io/projected/7f1895a1-2969-494e-b2eb-8f13b0816697-kube-api-access-4t7vv\") pod \"nova-metadata-0\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " pod="openstack/nova-metadata-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.853352 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f1895a1-2969-494e-b2eb-8f13b0816697-config-data\") pod \"nova-metadata-0\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " pod="openstack/nova-metadata-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.853392 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/470ef208-e9ff-49ee-ae66-212a38542ab8-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"470ef208-e9ff-49ee-ae66-212a38542ab8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.866353 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/470ef208-e9ff-49ee-ae66-212a38542ab8-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"470ef208-e9ff-49ee-ae66-212a38542ab8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.869065 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/470ef208-e9ff-49ee-ae66-212a38542ab8-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"470ef208-e9ff-49ee-ae66-212a38542ab8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.885427 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-k245g"] Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.886470 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.891489 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.902350 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzg2f\" (UniqueName: \"kubernetes.io/projected/470ef208-e9ff-49ee-ae66-212a38542ab8-kube-api-access-tzg2f\") pod \"nova-cell1-novncproxy-0\" (UID: \"470ef208-e9ff-49ee-ae66-212a38542ab8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.904880 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.928856 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-k245g"] Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.954662 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.956266 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7f1895a1-2969-494e-b2eb-8f13b0816697-logs\") pod \"nova-metadata-0\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " pod="openstack/nova-metadata-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.956303 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4t7vv\" (UniqueName: \"kubernetes.io/projected/7f1895a1-2969-494e-b2eb-8f13b0816697-kube-api-access-4t7vv\") pod \"nova-metadata-0\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " pod="openstack/nova-metadata-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.956323 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f1895a1-2969-494e-b2eb-8f13b0816697-config-data\") pod \"nova-metadata-0\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " pod="openstack/nova-metadata-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.956446 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f1895a1-2969-494e-b2eb-8f13b0816697-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " pod="openstack/nova-metadata-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.957415 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7f1895a1-2969-494e-b2eb-8f13b0816697-logs\") pod \"nova-metadata-0\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " pod="openstack/nova-metadata-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.973499 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f1895a1-2969-494e-b2eb-8f13b0816697-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " pod="openstack/nova-metadata-0" Jan 06 08:36:38 crc kubenswrapper[4784]: I0106 08:36:38.979873 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f1895a1-2969-494e-b2eb-8f13b0816697-config-data\") pod \"nova-metadata-0\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " pod="openstack/nova-metadata-0" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.019555 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4t7vv\" (UniqueName: \"kubernetes.io/projected/7f1895a1-2969-494e-b2eb-8f13b0816697-kube-api-access-4t7vv\") pod \"nova-metadata-0\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " pod="openstack/nova-metadata-0" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.058745 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlxx9\" (UniqueName: \"kubernetes.io/projected/97759d02-ee09-4c0a-bc00-a6a940f45fc5-kube-api-access-mlxx9\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.058841 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-dns-swift-storage-0\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.058938 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-dns-svc\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.059022 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-config\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.059041 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-ovsdbserver-sb\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.059084 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-ovsdbserver-nb\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.160647 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-config\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.160691 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-ovsdbserver-sb\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.160733 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-ovsdbserver-nb\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.160779 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlxx9\" (UniqueName: \"kubernetes.io/projected/97759d02-ee09-4c0a-bc00-a6a940f45fc5-kube-api-access-mlxx9\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.160801 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-dns-swift-storage-0\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.160871 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-dns-svc\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.161868 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-dns-svc\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.162373 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-ovsdbserver-sb\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.162589 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-config\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.162989 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-dns-swift-storage-0\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.163307 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-ovsdbserver-nb\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.193337 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlxx9\" (UniqueName: \"kubernetes.io/projected/97759d02-ee09-4c0a-bc00-a6a940f45fc5-kube-api-access-mlxx9\") pod \"dnsmasq-dns-5bfb54f9b5-k245g\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.289044 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.306871 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.377098 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-cwjjl"] Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.577367 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.760269 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.786157 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-2wpc6"] Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.790807 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.797504 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-scripts\") pod \"nova-cell1-conductor-db-sync-2wpc6\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.797608 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6g7c\" (UniqueName: \"kubernetes.io/projected/0c4c22ba-056d-49c3-94ba-a9847f419943-kube-api-access-c6g7c\") pod \"nova-cell1-conductor-db-sync-2wpc6\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.797636 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-config-data\") pod \"nova-cell1-conductor-db-sync-2wpc6\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.797856 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-2wpc6\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.798509 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.799218 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.836352 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-2wpc6"] Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.847080 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.905688 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-scripts\") pod \"nova-cell1-conductor-db-sync-2wpc6\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.905757 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6g7c\" (UniqueName: \"kubernetes.io/projected/0c4c22ba-056d-49c3-94ba-a9847f419943-kube-api-access-c6g7c\") pod \"nova-cell1-conductor-db-sync-2wpc6\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.905781 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-config-data\") pod \"nova-cell1-conductor-db-sync-2wpc6\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.905889 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-2wpc6\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.915078 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-scripts\") pod \"nova-cell1-conductor-db-sync-2wpc6\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.918222 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-config-data\") pod \"nova-cell1-conductor-db-sync-2wpc6\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.926399 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6g7c\" (UniqueName: \"kubernetes.io/projected/0c4c22ba-056d-49c3-94ba-a9847f419943-kube-api-access-c6g7c\") pod \"nova-cell1-conductor-db-sync-2wpc6\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.928995 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-2wpc6\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.981778 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-cwjjl" event={"ID":"a7af75ef-c428-4d9b-8887-4576bc478e80","Type":"ContainerStarted","Data":"51c6be1566d7abfb917cf987fce43a6501285d641013f5c5b50a40999bed6c52"} Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.981861 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-cwjjl" event={"ID":"a7af75ef-c428-4d9b-8887-4576bc478e80","Type":"ContainerStarted","Data":"8cc10eb9253327800b5660756a629e47d081af511cae42bbecd62ef4143fb653"} Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.986224 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"470ef208-e9ff-49ee-ae66-212a38542ab8","Type":"ContainerStarted","Data":"f69a8ac89a58115986b0639a88c1deaf1080ebfde40372ca7ff446f57760ff62"} Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.987634 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"025631c8-6dc9-4fc3-8492-5a9ce369b79d","Type":"ContainerStarted","Data":"9075361af5259259bca6d4a0b274d8a826d9f16793abb49e73bf258d66e66c8e"} Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.989133 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4c811477-8a61-494d-88ce-3642c7becc92","Type":"ContainerStarted","Data":"6ae81a1144a48e238d1a8173d5ad5bb5848a3bb53911b4def20f016e321f85be"} Jan 06 08:36:39 crc kubenswrapper[4784]: I0106 08:36:39.993000 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-k245g"] Jan 06 08:36:40 crc kubenswrapper[4784]: I0106 08:36:40.007898 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-cwjjl" podStartSLOduration=2.00787008 podStartE2EDuration="2.00787008s" podCreationTimestamp="2026-01-06 08:36:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:36:39.997816006 +0000 UTC m=+1302.043988853" watchObservedRunningTime="2026-01-06 08:36:40.00787008 +0000 UTC m=+1302.054042917" Jan 06 08:36:40 crc kubenswrapper[4784]: I0106 08:36:40.142445 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:40 crc kubenswrapper[4784]: I0106 08:36:40.168376 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:36:40 crc kubenswrapper[4784]: I0106 08:36:40.725247 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-2wpc6"] Jan 06 08:36:41 crc kubenswrapper[4784]: I0106 08:36:41.009649 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7f1895a1-2969-494e-b2eb-8f13b0816697","Type":"ContainerStarted","Data":"f7a8af4261e72c14122437e284dfa9efe90bd8efa3270ecde2324551f80d6c14"} Jan 06 08:36:41 crc kubenswrapper[4784]: I0106 08:36:41.013229 4784 generic.go:334] "Generic (PLEG): container finished" podID="97759d02-ee09-4c0a-bc00-a6a940f45fc5" containerID="e3ec423b01120c4bc4f3d63760694fc3257341409d7c3c6a6974d5e14e19ff27" exitCode=0 Jan 06 08:36:41 crc kubenswrapper[4784]: I0106 08:36:41.013413 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" event={"ID":"97759d02-ee09-4c0a-bc00-a6a940f45fc5","Type":"ContainerDied","Data":"e3ec423b01120c4bc4f3d63760694fc3257341409d7c3c6a6974d5e14e19ff27"} Jan 06 08:36:41 crc kubenswrapper[4784]: I0106 08:36:41.013477 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" event={"ID":"97759d02-ee09-4c0a-bc00-a6a940f45fc5","Type":"ContainerStarted","Data":"10fb19ffae5e16f590263e2d00076e71dc8b1aa0afc995751b37ba9c0a521ad2"} Jan 06 08:36:41 crc kubenswrapper[4784]: I0106 08:36:41.025973 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-2wpc6" event={"ID":"0c4c22ba-056d-49c3-94ba-a9847f419943","Type":"ContainerStarted","Data":"36f923ec92bd214e7bead992ff7352d75917245b230818bf04fd8666f5370beb"} Jan 06 08:36:42 crc kubenswrapper[4784]: I0106 08:36:42.040160 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" event={"ID":"97759d02-ee09-4c0a-bc00-a6a940f45fc5","Type":"ContainerStarted","Data":"ef4ed89dc708181ffc925d7e028fb5062c84cfa88d18f20a58a06f6a86020e18"} Jan 06 08:36:42 crc kubenswrapper[4784]: I0106 08:36:42.040586 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:42 crc kubenswrapper[4784]: I0106 08:36:42.041936 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-2wpc6" event={"ID":"0c4c22ba-056d-49c3-94ba-a9847f419943","Type":"ContainerStarted","Data":"dbb791de4205d4d85966f3bf1f337e666a95a1d84c1e47b53f6433801fbb0b76"} Jan 06 08:36:42 crc kubenswrapper[4784]: I0106 08:36:42.076393 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" podStartSLOduration=4.076343329 podStartE2EDuration="4.076343329s" podCreationTimestamp="2026-01-06 08:36:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:36:42.067466213 +0000 UTC m=+1304.113639050" watchObservedRunningTime="2026-01-06 08:36:42.076343329 +0000 UTC m=+1304.122516166" Jan 06 08:36:42 crc kubenswrapper[4784]: I0106 08:36:42.116337 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-2wpc6" podStartSLOduration=3.116303443 podStartE2EDuration="3.116303443s" podCreationTimestamp="2026-01-06 08:36:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:36:42.098619963 +0000 UTC m=+1304.144792820" watchObservedRunningTime="2026-01-06 08:36:42.116303443 +0000 UTC m=+1304.162476290" Jan 06 08:36:43 crc kubenswrapper[4784]: I0106 08:36:43.095526 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 06 08:36:43 crc kubenswrapper[4784]: I0106 08:36:43.180848 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:36:45 crc kubenswrapper[4784]: I0106 08:36:45.082847 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7f1895a1-2969-494e-b2eb-8f13b0816697","Type":"ContainerStarted","Data":"608871eb0eaf96d60136bab2a358ab97e7ed119acce127880111f057fabfa2e8"} Jan 06 08:36:45 crc kubenswrapper[4784]: I0106 08:36:45.083740 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7f1895a1-2969-494e-b2eb-8f13b0816697","Type":"ContainerStarted","Data":"665fc8185924982519245618aac648ee2f104e518ce4efc0636fe9f48626a3b9"} Jan 06 08:36:45 crc kubenswrapper[4784]: I0106 08:36:45.083454 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7f1895a1-2969-494e-b2eb-8f13b0816697" containerName="nova-metadata-metadata" containerID="cri-o://608871eb0eaf96d60136bab2a358ab97e7ed119acce127880111f057fabfa2e8" gracePeriod=30 Jan 06 08:36:45 crc kubenswrapper[4784]: I0106 08:36:45.082932 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7f1895a1-2969-494e-b2eb-8f13b0816697" containerName="nova-metadata-log" containerID="cri-o://665fc8185924982519245618aac648ee2f104e518ce4efc0636fe9f48626a3b9" gracePeriod=30 Jan 06 08:36:45 crc kubenswrapper[4784]: I0106 08:36:45.095160 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"470ef208-e9ff-49ee-ae66-212a38542ab8","Type":"ContainerStarted","Data":"ac978d98243fe0f46db22584b12130d78fc5bb3e5c3f2cfb67c1c3e6a80249f3"} Jan 06 08:36:45 crc kubenswrapper[4784]: I0106 08:36:45.095324 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="470ef208-e9ff-49ee-ae66-212a38542ab8" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://ac978d98243fe0f46db22584b12130d78fc5bb3e5c3f2cfb67c1c3e6a80249f3" gracePeriod=30 Jan 06 08:36:45 crc kubenswrapper[4784]: I0106 08:36:45.103869 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"025631c8-6dc9-4fc3-8492-5a9ce369b79d","Type":"ContainerStarted","Data":"121ed1710befd949b8308bca40e7bfd2cffea4d22200eb82a1dbe2ff0c3baa72"} Jan 06 08:36:45 crc kubenswrapper[4784]: I0106 08:36:45.107788 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.1299128290000002 podStartE2EDuration="7.107769585s" podCreationTimestamp="2026-01-06 08:36:38 +0000 UTC" firstStartedPulling="2026-01-06 08:36:40.185263823 +0000 UTC m=+1302.231436660" lastFinishedPulling="2026-01-06 08:36:44.163120579 +0000 UTC m=+1306.209293416" observedRunningTime="2026-01-06 08:36:45.104934276 +0000 UTC m=+1307.151107113" watchObservedRunningTime="2026-01-06 08:36:45.107769585 +0000 UTC m=+1307.153942422" Jan 06 08:36:45 crc kubenswrapper[4784]: I0106 08:36:45.124513 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4c811477-8a61-494d-88ce-3642c7becc92","Type":"ContainerStarted","Data":"ecf83e1238473049fa9df0f598a521008a4c7dc6b181e9eaa22fa538a42a06a8"} Jan 06 08:36:45 crc kubenswrapper[4784]: I0106 08:36:45.124616 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4c811477-8a61-494d-88ce-3642c7becc92","Type":"ContainerStarted","Data":"73a2d7251a65406d6311221b1b3d82d4d2fe27e88330dcb6a69286e36feb9424"} Jan 06 08:36:45 crc kubenswrapper[4784]: I0106 08:36:45.143242 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.779165485 podStartE2EDuration="7.143218978s" podCreationTimestamp="2026-01-06 08:36:38 +0000 UTC" firstStartedPulling="2026-01-06 08:36:39.79821663 +0000 UTC m=+1301.844389467" lastFinishedPulling="2026-01-06 08:36:44.162270123 +0000 UTC m=+1306.208442960" observedRunningTime="2026-01-06 08:36:45.131456602 +0000 UTC m=+1307.177629439" watchObservedRunningTime="2026-01-06 08:36:45.143218978 +0000 UTC m=+1307.189391815" Jan 06 08:36:45 crc kubenswrapper[4784]: I0106 08:36:45.154508 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.7637072849999997 podStartE2EDuration="7.154488629s" podCreationTimestamp="2026-01-06 08:36:38 +0000 UTC" firstStartedPulling="2026-01-06 08:36:39.774625326 +0000 UTC m=+1301.820798163" lastFinishedPulling="2026-01-06 08:36:44.16540667 +0000 UTC m=+1306.211579507" observedRunningTime="2026-01-06 08:36:45.148221014 +0000 UTC m=+1307.194393851" watchObservedRunningTime="2026-01-06 08:36:45.154488629 +0000 UTC m=+1307.200661466" Jan 06 08:36:45 crc kubenswrapper[4784]: I0106 08:36:45.175475 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.630221549 podStartE2EDuration="7.175456383s" podCreationTimestamp="2026-01-06 08:36:38 +0000 UTC" firstStartedPulling="2026-01-06 08:36:39.619770704 +0000 UTC m=+1301.665943541" lastFinishedPulling="2026-01-06 08:36:44.165005538 +0000 UTC m=+1306.211178375" observedRunningTime="2026-01-06 08:36:45.172271003 +0000 UTC m=+1307.218443830" watchObservedRunningTime="2026-01-06 08:36:45.175456383 +0000 UTC m=+1307.221629220" Jan 06 08:36:46 crc kubenswrapper[4784]: I0106 08:36:46.137530 4784 generic.go:334] "Generic (PLEG): container finished" podID="7f1895a1-2969-494e-b2eb-8f13b0816697" containerID="665fc8185924982519245618aac648ee2f104e518ce4efc0636fe9f48626a3b9" exitCode=143 Jan 06 08:36:46 crc kubenswrapper[4784]: I0106 08:36:46.137667 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7f1895a1-2969-494e-b2eb-8f13b0816697","Type":"ContainerDied","Data":"665fc8185924982519245618aac648ee2f104e518ce4efc0636fe9f48626a3b9"} Jan 06 08:36:48 crc kubenswrapper[4784]: I0106 08:36:48.887994 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 06 08:36:48 crc kubenswrapper[4784]: I0106 08:36:48.888382 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 06 08:36:48 crc kubenswrapper[4784]: I0106 08:36:48.906499 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 06 08:36:48 crc kubenswrapper[4784]: I0106 08:36:48.906554 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 06 08:36:48 crc kubenswrapper[4784]: I0106 08:36:48.922349 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 06 08:36:48 crc kubenswrapper[4784]: I0106 08:36:48.955670 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:36:49 crc kubenswrapper[4784]: I0106 08:36:49.173241 4784 generic.go:334] "Generic (PLEG): container finished" podID="a7af75ef-c428-4d9b-8887-4576bc478e80" containerID="51c6be1566d7abfb917cf987fce43a6501285d641013f5c5b50a40999bed6c52" exitCode=0 Jan 06 08:36:49 crc kubenswrapper[4784]: I0106 08:36:49.173636 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-cwjjl" event={"ID":"a7af75ef-c428-4d9b-8887-4576bc478e80","Type":"ContainerDied","Data":"51c6be1566d7abfb917cf987fce43a6501285d641013f5c5b50a40999bed6c52"} Jan 06 08:36:49 crc kubenswrapper[4784]: I0106 08:36:49.231019 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 06 08:36:49 crc kubenswrapper[4784]: I0106 08:36:49.290763 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:36:49 crc kubenswrapper[4784]: I0106 08:36:49.307357 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 06 08:36:49 crc kubenswrapper[4784]: I0106 08:36:49.307402 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 06 08:36:49 crc kubenswrapper[4784]: I0106 08:36:49.368062 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-8fpxg"] Jan 06 08:36:49 crc kubenswrapper[4784]: I0106 08:36:49.368375 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" podUID="1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" containerName="dnsmasq-dns" containerID="cri-o://f90613b7d982b1882c5bb14f3d0894b2d2113c8fdce7e30d214f8f535fda3796" gracePeriod=10 Jan 06 08:36:49 crc kubenswrapper[4784]: I0106 08:36:49.925760 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:36:49 crc kubenswrapper[4784]: I0106 08:36:49.990918 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4c811477-8a61-494d-88ce-3642c7becc92" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.186:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 06 08:36:49 crc kubenswrapper[4784]: I0106 08:36:49.990947 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4c811477-8a61-494d-88ce-3642c7becc92" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.186:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.045875 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-ovsdbserver-nb\") pod \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.045973 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcjdb\" (UniqueName: \"kubernetes.io/projected/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-kube-api-access-xcjdb\") pod \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.046043 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-dns-swift-storage-0\") pod \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.046127 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-ovsdbserver-sb\") pod \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.046253 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-config\") pod \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.046297 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-dns-svc\") pod \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\" (UID: \"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2\") " Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.070206 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-kube-api-access-xcjdb" (OuterVolumeSpecName: "kube-api-access-xcjdb") pod "1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" (UID: "1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2"). InnerVolumeSpecName "kube-api-access-xcjdb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.122699 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" (UID: "1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.125481 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" (UID: "1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.126558 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" (UID: "1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.142586 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-config" (OuterVolumeSpecName: "config") pod "1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" (UID: "1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.143705 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" (UID: "1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.149533 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.149614 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcjdb\" (UniqueName: \"kubernetes.io/projected/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-kube-api-access-xcjdb\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.149629 4784 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.149640 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.149652 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.149661 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.199491 4784 generic.go:334] "Generic (PLEG): container finished" podID="0c4c22ba-056d-49c3-94ba-a9847f419943" containerID="dbb791de4205d4d85966f3bf1f337e666a95a1d84c1e47b53f6433801fbb0b76" exitCode=0 Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.199595 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-2wpc6" event={"ID":"0c4c22ba-056d-49c3-94ba-a9847f419943","Type":"ContainerDied","Data":"dbb791de4205d4d85966f3bf1f337e666a95a1d84c1e47b53f6433801fbb0b76"} Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.207410 4784 generic.go:334] "Generic (PLEG): container finished" podID="1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" containerID="f90613b7d982b1882c5bb14f3d0894b2d2113c8fdce7e30d214f8f535fda3796" exitCode=0 Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.207601 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.207634 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" event={"ID":"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2","Type":"ContainerDied","Data":"f90613b7d982b1882c5bb14f3d0894b2d2113c8fdce7e30d214f8f535fda3796"} Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.210556 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" event={"ID":"1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2","Type":"ContainerDied","Data":"646ef0c4094671fdb606fdbf7e7547f6bea328fb7b9b12a85f9afe2c8e3769b9"} Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.210594 4784 scope.go:117] "RemoveContainer" containerID="f90613b7d982b1882c5bb14f3d0894b2d2113c8fdce7e30d214f8f535fda3796" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.256291 4784 scope.go:117] "RemoveContainer" containerID="0f2565613a2e2471a486b91ac944082cd1665104e6b52d3fabe835597650894f" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.273734 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-8fpxg"] Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.284918 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-8fpxg"] Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.286495 4784 scope.go:117] "RemoveContainer" containerID="f90613b7d982b1882c5bb14f3d0894b2d2113c8fdce7e30d214f8f535fda3796" Jan 06 08:36:50 crc kubenswrapper[4784]: E0106 08:36:50.288132 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f90613b7d982b1882c5bb14f3d0894b2d2113c8fdce7e30d214f8f535fda3796\": container with ID starting with f90613b7d982b1882c5bb14f3d0894b2d2113c8fdce7e30d214f8f535fda3796 not found: ID does not exist" containerID="f90613b7d982b1882c5bb14f3d0894b2d2113c8fdce7e30d214f8f535fda3796" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.288230 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f90613b7d982b1882c5bb14f3d0894b2d2113c8fdce7e30d214f8f535fda3796"} err="failed to get container status \"f90613b7d982b1882c5bb14f3d0894b2d2113c8fdce7e30d214f8f535fda3796\": rpc error: code = NotFound desc = could not find container \"f90613b7d982b1882c5bb14f3d0894b2d2113c8fdce7e30d214f8f535fda3796\": container with ID starting with f90613b7d982b1882c5bb14f3d0894b2d2113c8fdce7e30d214f8f535fda3796 not found: ID does not exist" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.288259 4784 scope.go:117] "RemoveContainer" containerID="0f2565613a2e2471a486b91ac944082cd1665104e6b52d3fabe835597650894f" Jan 06 08:36:50 crc kubenswrapper[4784]: E0106 08:36:50.288687 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f2565613a2e2471a486b91ac944082cd1665104e6b52d3fabe835597650894f\": container with ID starting with 0f2565613a2e2471a486b91ac944082cd1665104e6b52d3fabe835597650894f not found: ID does not exist" containerID="0f2565613a2e2471a486b91ac944082cd1665104e6b52d3fabe835597650894f" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.288751 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f2565613a2e2471a486b91ac944082cd1665104e6b52d3fabe835597650894f"} err="failed to get container status \"0f2565613a2e2471a486b91ac944082cd1665104e6b52d3fabe835597650894f\": rpc error: code = NotFound desc = could not find container \"0f2565613a2e2471a486b91ac944082cd1665104e6b52d3fabe835597650894f\": container with ID starting with 0f2565613a2e2471a486b91ac944082cd1665104e6b52d3fabe835597650894f not found: ID does not exist" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.336672 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" path="/var/lib/kubelet/pods/1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2/volumes" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.573528 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.659478 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmm6k\" (UniqueName: \"kubernetes.io/projected/a7af75ef-c428-4d9b-8887-4576bc478e80-kube-api-access-nmm6k\") pod \"a7af75ef-c428-4d9b-8887-4576bc478e80\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.659619 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-config-data\") pod \"a7af75ef-c428-4d9b-8887-4576bc478e80\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.659701 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-combined-ca-bundle\") pod \"a7af75ef-c428-4d9b-8887-4576bc478e80\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.659965 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-scripts\") pod \"a7af75ef-c428-4d9b-8887-4576bc478e80\" (UID: \"a7af75ef-c428-4d9b-8887-4576bc478e80\") " Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.664330 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7af75ef-c428-4d9b-8887-4576bc478e80-kube-api-access-nmm6k" (OuterVolumeSpecName: "kube-api-access-nmm6k") pod "a7af75ef-c428-4d9b-8887-4576bc478e80" (UID: "a7af75ef-c428-4d9b-8887-4576bc478e80"). InnerVolumeSpecName "kube-api-access-nmm6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.666863 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-scripts" (OuterVolumeSpecName: "scripts") pod "a7af75ef-c428-4d9b-8887-4576bc478e80" (UID: "a7af75ef-c428-4d9b-8887-4576bc478e80"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.690613 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a7af75ef-c428-4d9b-8887-4576bc478e80" (UID: "a7af75ef-c428-4d9b-8887-4576bc478e80"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.713711 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-config-data" (OuterVolumeSpecName: "config-data") pod "a7af75ef-c428-4d9b-8887-4576bc478e80" (UID: "a7af75ef-c428-4d9b-8887-4576bc478e80"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.763203 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.763244 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmm6k\" (UniqueName: \"kubernetes.io/projected/a7af75ef-c428-4d9b-8887-4576bc478e80-kube-api-access-nmm6k\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.763256 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:50 crc kubenswrapper[4784]: I0106 08:36:50.763267 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7af75ef-c428-4d9b-8887-4576bc478e80-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.261978 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-cwjjl" event={"ID":"a7af75ef-c428-4d9b-8887-4576bc478e80","Type":"ContainerDied","Data":"8cc10eb9253327800b5660756a629e47d081af511cae42bbecd62ef4143fb653"} Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.262034 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8cc10eb9253327800b5660756a629e47d081af511cae42bbecd62ef4143fb653" Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.262125 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-cwjjl" Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.353296 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.353868 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4c811477-8a61-494d-88ce-3642c7becc92" containerName="nova-api-log" containerID="cri-o://73a2d7251a65406d6311221b1b3d82d4d2fe27e88330dcb6a69286e36feb9424" gracePeriod=30 Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.354534 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4c811477-8a61-494d-88ce-3642c7becc92" containerName="nova-api-api" containerID="cri-o://ecf83e1238473049fa9df0f598a521008a4c7dc6b181e9eaa22fa538a42a06a8" gracePeriod=30 Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.363255 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.363486 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="025631c8-6dc9-4fc3-8492-5a9ce369b79d" containerName="nova-scheduler-scheduler" containerID="cri-o://121ed1710befd949b8308bca40e7bfd2cffea4d22200eb82a1dbe2ff0c3baa72" gracePeriod=30 Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.700406 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.789693 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-scripts\") pod \"0c4c22ba-056d-49c3-94ba-a9847f419943\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.789928 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-config-data\") pod \"0c4c22ba-056d-49c3-94ba-a9847f419943\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.790044 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-combined-ca-bundle\") pod \"0c4c22ba-056d-49c3-94ba-a9847f419943\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.790111 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6g7c\" (UniqueName: \"kubernetes.io/projected/0c4c22ba-056d-49c3-94ba-a9847f419943-kube-api-access-c6g7c\") pod \"0c4c22ba-056d-49c3-94ba-a9847f419943\" (UID: \"0c4c22ba-056d-49c3-94ba-a9847f419943\") " Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.816187 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c4c22ba-056d-49c3-94ba-a9847f419943-kube-api-access-c6g7c" (OuterVolumeSpecName: "kube-api-access-c6g7c") pod "0c4c22ba-056d-49c3-94ba-a9847f419943" (UID: "0c4c22ba-056d-49c3-94ba-a9847f419943"). InnerVolumeSpecName "kube-api-access-c6g7c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.827110 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-scripts" (OuterVolumeSpecName: "scripts") pod "0c4c22ba-056d-49c3-94ba-a9847f419943" (UID: "0c4c22ba-056d-49c3-94ba-a9847f419943"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.841109 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c4c22ba-056d-49c3-94ba-a9847f419943" (UID: "0c4c22ba-056d-49c3-94ba-a9847f419943"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.863168 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-config-data" (OuterVolumeSpecName: "config-data") pod "0c4c22ba-056d-49c3-94ba-a9847f419943" (UID: "0c4c22ba-056d-49c3-94ba-a9847f419943"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.899201 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.899257 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6g7c\" (UniqueName: \"kubernetes.io/projected/0c4c22ba-056d-49c3-94ba-a9847f419943-kube-api-access-c6g7c\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.899273 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:51 crc kubenswrapper[4784]: I0106 08:36:51.899284 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c4c22ba-056d-49c3-94ba-a9847f419943-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.289417 4784 generic.go:334] "Generic (PLEG): container finished" podID="4c811477-8a61-494d-88ce-3642c7becc92" containerID="73a2d7251a65406d6311221b1b3d82d4d2fe27e88330dcb6a69286e36feb9424" exitCode=143 Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.290000 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4c811477-8a61-494d-88ce-3642c7becc92","Type":"ContainerDied","Data":"73a2d7251a65406d6311221b1b3d82d4d2fe27e88330dcb6a69286e36feb9424"} Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.293523 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-2wpc6" event={"ID":"0c4c22ba-056d-49c3-94ba-a9847f419943","Type":"ContainerDied","Data":"36f923ec92bd214e7bead992ff7352d75917245b230818bf04fd8666f5370beb"} Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.293595 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36f923ec92bd214e7bead992ff7352d75917245b230818bf04fd8666f5370beb" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.293718 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-2wpc6" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.362801 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 06 08:36:52 crc kubenswrapper[4784]: E0106 08:36:52.363370 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" containerName="dnsmasq-dns" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.363392 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" containerName="dnsmasq-dns" Jan 06 08:36:52 crc kubenswrapper[4784]: E0106 08:36:52.363425 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c4c22ba-056d-49c3-94ba-a9847f419943" containerName="nova-cell1-conductor-db-sync" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.363433 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c4c22ba-056d-49c3-94ba-a9847f419943" containerName="nova-cell1-conductor-db-sync" Jan 06 08:36:52 crc kubenswrapper[4784]: E0106 08:36:52.363445 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" containerName="init" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.363451 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" containerName="init" Jan 06 08:36:52 crc kubenswrapper[4784]: E0106 08:36:52.363461 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7af75ef-c428-4d9b-8887-4576bc478e80" containerName="nova-manage" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.363467 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7af75ef-c428-4d9b-8887-4576bc478e80" containerName="nova-manage" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.363693 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c4c22ba-056d-49c3-94ba-a9847f419943" containerName="nova-cell1-conductor-db-sync" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.363711 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" containerName="dnsmasq-dns" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.363722 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7af75ef-c428-4d9b-8887-4576bc478e80" containerName="nova-manage" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.364500 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.367263 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.381531 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.513569 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5f153c14-0bd9-4c9f-a8fc-c54c80722bce\") " pod="openstack/nova-cell1-conductor-0" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.513652 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2vf7\" (UniqueName: \"kubernetes.io/projected/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-kube-api-access-m2vf7\") pod \"nova-cell1-conductor-0\" (UID: \"5f153c14-0bd9-4c9f-a8fc-c54c80722bce\") " pod="openstack/nova-cell1-conductor-0" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.513722 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5f153c14-0bd9-4c9f-a8fc-c54c80722bce\") " pod="openstack/nova-cell1-conductor-0" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.615673 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5f153c14-0bd9-4c9f-a8fc-c54c80722bce\") " pod="openstack/nova-cell1-conductor-0" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.615741 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2vf7\" (UniqueName: \"kubernetes.io/projected/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-kube-api-access-m2vf7\") pod \"nova-cell1-conductor-0\" (UID: \"5f153c14-0bd9-4c9f-a8fc-c54c80722bce\") " pod="openstack/nova-cell1-conductor-0" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.615789 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5f153c14-0bd9-4c9f-a8fc-c54c80722bce\") " pod="openstack/nova-cell1-conductor-0" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.623473 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5f153c14-0bd9-4c9f-a8fc-c54c80722bce\") " pod="openstack/nova-cell1-conductor-0" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.624097 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5f153c14-0bd9-4c9f-a8fc-c54c80722bce\") " pod="openstack/nova-cell1-conductor-0" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.634430 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2vf7\" (UniqueName: \"kubernetes.io/projected/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-kube-api-access-m2vf7\") pod \"nova-cell1-conductor-0\" (UID: \"5f153c14-0bd9-4c9f-a8fc-c54c80722bce\") " pod="openstack/nova-cell1-conductor-0" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.770519 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 06 08:36:52 crc kubenswrapper[4784]: I0106 08:36:52.916571 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.023730 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/025631c8-6dc9-4fc3-8492-5a9ce369b79d-config-data\") pod \"025631c8-6dc9-4fc3-8492-5a9ce369b79d\" (UID: \"025631c8-6dc9-4fc3-8492-5a9ce369b79d\") " Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.023988 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxrkz\" (UniqueName: \"kubernetes.io/projected/025631c8-6dc9-4fc3-8492-5a9ce369b79d-kube-api-access-jxrkz\") pod \"025631c8-6dc9-4fc3-8492-5a9ce369b79d\" (UID: \"025631c8-6dc9-4fc3-8492-5a9ce369b79d\") " Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.024102 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/025631c8-6dc9-4fc3-8492-5a9ce369b79d-combined-ca-bundle\") pod \"025631c8-6dc9-4fc3-8492-5a9ce369b79d\" (UID: \"025631c8-6dc9-4fc3-8492-5a9ce369b79d\") " Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.030905 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/025631c8-6dc9-4fc3-8492-5a9ce369b79d-kube-api-access-jxrkz" (OuterVolumeSpecName: "kube-api-access-jxrkz") pod "025631c8-6dc9-4fc3-8492-5a9ce369b79d" (UID: "025631c8-6dc9-4fc3-8492-5a9ce369b79d"). InnerVolumeSpecName "kube-api-access-jxrkz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.057899 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/025631c8-6dc9-4fc3-8492-5a9ce369b79d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "025631c8-6dc9-4fc3-8492-5a9ce369b79d" (UID: "025631c8-6dc9-4fc3-8492-5a9ce369b79d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.063965 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/025631c8-6dc9-4fc3-8492-5a9ce369b79d-config-data" (OuterVolumeSpecName: "config-data") pod "025631c8-6dc9-4fc3-8492-5a9ce369b79d" (UID: "025631c8-6dc9-4fc3-8492-5a9ce369b79d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.127161 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/025631c8-6dc9-4fc3-8492-5a9ce369b79d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.127219 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/025631c8-6dc9-4fc3-8492-5a9ce369b79d-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.127229 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxrkz\" (UniqueName: \"kubernetes.io/projected/025631c8-6dc9-4fc3-8492-5a9ce369b79d-kube-api-access-jxrkz\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.252873 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.305360 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"5f153c14-0bd9-4c9f-a8fc-c54c80722bce","Type":"ContainerStarted","Data":"33daef59f535476e9244293a33ebbf460367627addd402a1197c073a7d2862bc"} Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.308234 4784 generic.go:334] "Generic (PLEG): container finished" podID="025631c8-6dc9-4fc3-8492-5a9ce369b79d" containerID="121ed1710befd949b8308bca40e7bfd2cffea4d22200eb82a1dbe2ff0c3baa72" exitCode=0 Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.308281 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"025631c8-6dc9-4fc3-8492-5a9ce369b79d","Type":"ContainerDied","Data":"121ed1710befd949b8308bca40e7bfd2cffea4d22200eb82a1dbe2ff0c3baa72"} Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.308299 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"025631c8-6dc9-4fc3-8492-5a9ce369b79d","Type":"ContainerDied","Data":"9075361af5259259bca6d4a0b274d8a826d9f16793abb49e73bf258d66e66c8e"} Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.308317 4784 scope.go:117] "RemoveContainer" containerID="121ed1710befd949b8308bca40e7bfd2cffea4d22200eb82a1dbe2ff0c3baa72" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.308458 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.370836 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.377707 4784 scope.go:117] "RemoveContainer" containerID="121ed1710befd949b8308bca40e7bfd2cffea4d22200eb82a1dbe2ff0c3baa72" Jan 06 08:36:53 crc kubenswrapper[4784]: E0106 08:36:53.378668 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"121ed1710befd949b8308bca40e7bfd2cffea4d22200eb82a1dbe2ff0c3baa72\": container with ID starting with 121ed1710befd949b8308bca40e7bfd2cffea4d22200eb82a1dbe2ff0c3baa72 not found: ID does not exist" containerID="121ed1710befd949b8308bca40e7bfd2cffea4d22200eb82a1dbe2ff0c3baa72" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.378729 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"121ed1710befd949b8308bca40e7bfd2cffea4d22200eb82a1dbe2ff0c3baa72"} err="failed to get container status \"121ed1710befd949b8308bca40e7bfd2cffea4d22200eb82a1dbe2ff0c3baa72\": rpc error: code = NotFound desc = could not find container \"121ed1710befd949b8308bca40e7bfd2cffea4d22200eb82a1dbe2ff0c3baa72\": container with ID starting with 121ed1710befd949b8308bca40e7bfd2cffea4d22200eb82a1dbe2ff0c3baa72 not found: ID does not exist" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.386338 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.407242 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:36:53 crc kubenswrapper[4784]: E0106 08:36:53.408079 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="025631c8-6dc9-4fc3-8492-5a9ce369b79d" containerName="nova-scheduler-scheduler" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.408115 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="025631c8-6dc9-4fc3-8492-5a9ce369b79d" containerName="nova-scheduler-scheduler" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.408503 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="025631c8-6dc9-4fc3-8492-5a9ce369b79d" containerName="nova-scheduler-scheduler" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.409784 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.416152 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.432421 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.539608 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e978fb0-450f-4c11-bdb8-6bc6b181d471-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0e978fb0-450f-4c11-bdb8-6bc6b181d471\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.540015 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2kl2\" (UniqueName: \"kubernetes.io/projected/0e978fb0-450f-4c11-bdb8-6bc6b181d471-kube-api-access-c2kl2\") pod \"nova-scheduler-0\" (UID: \"0e978fb0-450f-4c11-bdb8-6bc6b181d471\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.540241 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e978fb0-450f-4c11-bdb8-6bc6b181d471-config-data\") pod \"nova-scheduler-0\" (UID: \"0e978fb0-450f-4c11-bdb8-6bc6b181d471\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.642767 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2kl2\" (UniqueName: \"kubernetes.io/projected/0e978fb0-450f-4c11-bdb8-6bc6b181d471-kube-api-access-c2kl2\") pod \"nova-scheduler-0\" (UID: \"0e978fb0-450f-4c11-bdb8-6bc6b181d471\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.642961 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e978fb0-450f-4c11-bdb8-6bc6b181d471-config-data\") pod \"nova-scheduler-0\" (UID: \"0e978fb0-450f-4c11-bdb8-6bc6b181d471\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.643273 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e978fb0-450f-4c11-bdb8-6bc6b181d471-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0e978fb0-450f-4c11-bdb8-6bc6b181d471\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.651804 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e978fb0-450f-4c11-bdb8-6bc6b181d471-config-data\") pod \"nova-scheduler-0\" (UID: \"0e978fb0-450f-4c11-bdb8-6bc6b181d471\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.652989 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e978fb0-450f-4c11-bdb8-6bc6b181d471-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0e978fb0-450f-4c11-bdb8-6bc6b181d471\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.667188 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2kl2\" (UniqueName: \"kubernetes.io/projected/0e978fb0-450f-4c11-bdb8-6bc6b181d471-kube-api-access-c2kl2\") pod \"nova-scheduler-0\" (UID: \"0e978fb0-450f-4c11-bdb8-6bc6b181d471\") " pod="openstack/nova-scheduler-0" Jan 06 08:36:53 crc kubenswrapper[4784]: I0106 08:36:53.736704 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 06 08:36:54 crc kubenswrapper[4784]: I0106 08:36:54.120255 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 06 08:36:54 crc kubenswrapper[4784]: I0106 08:36:54.232746 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:36:54 crc kubenswrapper[4784]: W0106 08:36:54.242176 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0e978fb0_450f_4c11_bdb8_6bc6b181d471.slice/crio-ad89c70e762f72acc71fcc401f4abbb112b32d6cbcc122b5fac6b1341f1b2de9 WatchSource:0}: Error finding container ad89c70e762f72acc71fcc401f4abbb112b32d6cbcc122b5fac6b1341f1b2de9: Status 404 returned error can't find the container with id ad89c70e762f72acc71fcc401f4abbb112b32d6cbcc122b5fac6b1341f1b2de9 Jan 06 08:36:54 crc kubenswrapper[4784]: I0106 08:36:54.355424 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="025631c8-6dc9-4fc3-8492-5a9ce369b79d" path="/var/lib/kubelet/pods/025631c8-6dc9-4fc3-8492-5a9ce369b79d/volumes" Jan 06 08:36:54 crc kubenswrapper[4784]: I0106 08:36:54.356174 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"5f153c14-0bd9-4c9f-a8fc-c54c80722bce","Type":"ContainerStarted","Data":"286e16d27fa94436ac1831d5cc52871c589d62a999f488353b1b1767a2d56d65"} Jan 06 08:36:54 crc kubenswrapper[4784]: I0106 08:36:54.356211 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0e978fb0-450f-4c11-bdb8-6bc6b181d471","Type":"ContainerStarted","Data":"ad89c70e762f72acc71fcc401f4abbb112b32d6cbcc122b5fac6b1341f1b2de9"} Jan 06 08:36:54 crc kubenswrapper[4784]: I0106 08:36:54.356233 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 06 08:36:54 crc kubenswrapper[4784]: I0106 08:36:54.383132 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.383102518 podStartE2EDuration="2.383102518s" podCreationTimestamp="2026-01-06 08:36:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:36:54.369408761 +0000 UTC m=+1316.415581588" watchObservedRunningTime="2026-01-06 08:36:54.383102518 +0000 UTC m=+1316.429275355" Jan 06 08:36:54 crc kubenswrapper[4784]: I0106 08:36:54.904666 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6b4f5fc4f-8fpxg" podUID="1a76b6a5-83ce-4ded-a5c5-6bdbd80bcfe2" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.162:5353: i/o timeout" Jan 06 08:36:55 crc kubenswrapper[4784]: I0106 08:36:55.374647 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0e978fb0-450f-4c11-bdb8-6bc6b181d471","Type":"ContainerStarted","Data":"c03d9906b7768ae9a3aa053028fab2ef62650309d7600bec779eb450fa69b23c"} Jan 06 08:36:55 crc kubenswrapper[4784]: I0106 08:36:55.418085 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.418027334 podStartE2EDuration="2.418027334s" podCreationTimestamp="2026-01-06 08:36:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:36:55.400840408 +0000 UTC m=+1317.447013285" watchObservedRunningTime="2026-01-06 08:36:55.418027334 +0000 UTC m=+1317.464200181" Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.388731 4784 generic.go:334] "Generic (PLEG): container finished" podID="4c811477-8a61-494d-88ce-3642c7becc92" containerID="ecf83e1238473049fa9df0f598a521008a4c7dc6b181e9eaa22fa538a42a06a8" exitCode=0 Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.388840 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4c811477-8a61-494d-88ce-3642c7becc92","Type":"ContainerDied","Data":"ecf83e1238473049fa9df0f598a521008a4c7dc6b181e9eaa22fa538a42a06a8"} Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.389687 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4c811477-8a61-494d-88ce-3642c7becc92","Type":"ContainerDied","Data":"6ae81a1144a48e238d1a8173d5ad5bb5848a3bb53911b4def20f016e321f85be"} Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.389742 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ae81a1144a48e238d1a8173d5ad5bb5848a3bb53911b4def20f016e321f85be" Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.465107 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.622169 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c811477-8a61-494d-88ce-3642c7becc92-combined-ca-bundle\") pod \"4c811477-8a61-494d-88ce-3642c7becc92\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.622327 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c811477-8a61-494d-88ce-3642c7becc92-logs\") pod \"4c811477-8a61-494d-88ce-3642c7becc92\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.622385 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c811477-8a61-494d-88ce-3642c7becc92-config-data\") pod \"4c811477-8a61-494d-88ce-3642c7becc92\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.622513 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2zlmc\" (UniqueName: \"kubernetes.io/projected/4c811477-8a61-494d-88ce-3642c7becc92-kube-api-access-2zlmc\") pod \"4c811477-8a61-494d-88ce-3642c7becc92\" (UID: \"4c811477-8a61-494d-88ce-3642c7becc92\") " Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.623810 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c811477-8a61-494d-88ce-3642c7becc92-logs" (OuterVolumeSpecName: "logs") pod "4c811477-8a61-494d-88ce-3642c7becc92" (UID: "4c811477-8a61-494d-88ce-3642c7becc92"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.632520 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c811477-8a61-494d-88ce-3642c7becc92-kube-api-access-2zlmc" (OuterVolumeSpecName: "kube-api-access-2zlmc") pod "4c811477-8a61-494d-88ce-3642c7becc92" (UID: "4c811477-8a61-494d-88ce-3642c7becc92"). InnerVolumeSpecName "kube-api-access-2zlmc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.664263 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c811477-8a61-494d-88ce-3642c7becc92-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c811477-8a61-494d-88ce-3642c7becc92" (UID: "4c811477-8a61-494d-88ce-3642c7becc92"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.676351 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c811477-8a61-494d-88ce-3642c7becc92-config-data" (OuterVolumeSpecName: "config-data") pod "4c811477-8a61-494d-88ce-3642c7becc92" (UID: "4c811477-8a61-494d-88ce-3642c7becc92"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.725626 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c811477-8a61-494d-88ce-3642c7becc92-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.725675 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c811477-8a61-494d-88ce-3642c7becc92-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.725689 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c811477-8a61-494d-88ce-3642c7becc92-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:56 crc kubenswrapper[4784]: I0106 08:36:56.725703 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2zlmc\" (UniqueName: \"kubernetes.io/projected/4c811477-8a61-494d-88ce-3642c7becc92-kube-api-access-2zlmc\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.404264 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.442877 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.464049 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.473313 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 06 08:36:57 crc kubenswrapper[4784]: E0106 08:36:57.473785 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c811477-8a61-494d-88ce-3642c7becc92" containerName="nova-api-log" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.473803 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c811477-8a61-494d-88ce-3642c7becc92" containerName="nova-api-log" Jan 06 08:36:57 crc kubenswrapper[4784]: E0106 08:36:57.473840 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c811477-8a61-494d-88ce-3642c7becc92" containerName="nova-api-api" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.473848 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c811477-8a61-494d-88ce-3642c7becc92" containerName="nova-api-api" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.474027 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c811477-8a61-494d-88ce-3642c7becc92" containerName="nova-api-api" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.474048 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c811477-8a61-494d-88ce-3642c7becc92" containerName="nova-api-log" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.477291 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.479935 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.486650 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.646243 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jq7d\" (UniqueName: \"kubernetes.io/projected/be92cba4-83b4-4f7f-b645-972cbc86c55a-kube-api-access-9jq7d\") pod \"nova-api-0\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " pod="openstack/nova-api-0" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.646302 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be92cba4-83b4-4f7f-b645-972cbc86c55a-logs\") pod \"nova-api-0\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " pod="openstack/nova-api-0" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.646687 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be92cba4-83b4-4f7f-b645-972cbc86c55a-config-data\") pod \"nova-api-0\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " pod="openstack/nova-api-0" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.647333 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be92cba4-83b4-4f7f-b645-972cbc86c55a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " pod="openstack/nova-api-0" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.749247 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be92cba4-83b4-4f7f-b645-972cbc86c55a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " pod="openstack/nova-api-0" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.749322 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jq7d\" (UniqueName: \"kubernetes.io/projected/be92cba4-83b4-4f7f-b645-972cbc86c55a-kube-api-access-9jq7d\") pod \"nova-api-0\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " pod="openstack/nova-api-0" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.749354 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be92cba4-83b4-4f7f-b645-972cbc86c55a-logs\") pod \"nova-api-0\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " pod="openstack/nova-api-0" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.749415 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be92cba4-83b4-4f7f-b645-972cbc86c55a-config-data\") pod \"nova-api-0\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " pod="openstack/nova-api-0" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.750243 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be92cba4-83b4-4f7f-b645-972cbc86c55a-logs\") pod \"nova-api-0\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " pod="openstack/nova-api-0" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.754523 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be92cba4-83b4-4f7f-b645-972cbc86c55a-config-data\") pod \"nova-api-0\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " pod="openstack/nova-api-0" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.754536 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be92cba4-83b4-4f7f-b645-972cbc86c55a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " pod="openstack/nova-api-0" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.768236 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jq7d\" (UniqueName: \"kubernetes.io/projected/be92cba4-83b4-4f7f-b645-972cbc86c55a-kube-api-access-9jq7d\") pod \"nova-api-0\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " pod="openstack/nova-api-0" Jan 06 08:36:57 crc kubenswrapper[4784]: I0106 08:36:57.820286 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:36:58 crc kubenswrapper[4784]: I0106 08:36:58.089159 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 06 08:36:58 crc kubenswrapper[4784]: I0106 08:36:58.096828 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="eaa292ea-ca6c-44b0-9dc5-b4436c657c3f" containerName="kube-state-metrics" containerID="cri-o://45d05df335bdaf2ee6d1081eceeb0fc66d2d5184d9c6e77356e4a0e16e487c5d" gracePeriod=30 Jan 06 08:36:58 crc kubenswrapper[4784]: I0106 08:36:58.367093 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c811477-8a61-494d-88ce-3642c7becc92" path="/var/lib/kubelet/pods/4c811477-8a61-494d-88ce-3642c7becc92/volumes" Jan 06 08:36:58 crc kubenswrapper[4784]: I0106 08:36:58.381560 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:36:58 crc kubenswrapper[4784]: I0106 08:36:58.417123 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"be92cba4-83b4-4f7f-b645-972cbc86c55a","Type":"ContainerStarted","Data":"a139f3ad1e7de2711bdd12ce7d054744232ee6dbacb833fa5917bc0997e3cf02"} Jan 06 08:36:58 crc kubenswrapper[4784]: I0106 08:36:58.419767 4784 generic.go:334] "Generic (PLEG): container finished" podID="eaa292ea-ca6c-44b0-9dc5-b4436c657c3f" containerID="45d05df335bdaf2ee6d1081eceeb0fc66d2d5184d9c6e77356e4a0e16e487c5d" exitCode=2 Jan 06 08:36:58 crc kubenswrapper[4784]: I0106 08:36:58.425121 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"eaa292ea-ca6c-44b0-9dc5-b4436c657c3f","Type":"ContainerDied","Data":"45d05df335bdaf2ee6d1081eceeb0fc66d2d5184d9c6e77356e4a0e16e487c5d"} Jan 06 08:36:58 crc kubenswrapper[4784]: I0106 08:36:58.738342 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.087600 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.192578 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tlcs5\" (UniqueName: \"kubernetes.io/projected/eaa292ea-ca6c-44b0-9dc5-b4436c657c3f-kube-api-access-tlcs5\") pod \"eaa292ea-ca6c-44b0-9dc5-b4436c657c3f\" (UID: \"eaa292ea-ca6c-44b0-9dc5-b4436c657c3f\") " Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.201125 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eaa292ea-ca6c-44b0-9dc5-b4436c657c3f-kube-api-access-tlcs5" (OuterVolumeSpecName: "kube-api-access-tlcs5") pod "eaa292ea-ca6c-44b0-9dc5-b4436c657c3f" (UID: "eaa292ea-ca6c-44b0-9dc5-b4436c657c3f"). InnerVolumeSpecName "kube-api-access-tlcs5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.296785 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tlcs5\" (UniqueName: \"kubernetes.io/projected/eaa292ea-ca6c-44b0-9dc5-b4436c657c3f-kube-api-access-tlcs5\") on node \"crc\" DevicePath \"\"" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.440085 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.440072 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"eaa292ea-ca6c-44b0-9dc5-b4436c657c3f","Type":"ContainerDied","Data":"21942dbe0e457cd19ecf3fa0e86ded5141c639e448dd08176cf8353f00376d0c"} Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.440356 4784 scope.go:117] "RemoveContainer" containerID="45d05df335bdaf2ee6d1081eceeb0fc66d2d5184d9c6e77356e4a0e16e487c5d" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.443048 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"be92cba4-83b4-4f7f-b645-972cbc86c55a","Type":"ContainerStarted","Data":"11173b50dab3312f5e3cbff3da5b4fb48e6b6bbcd567291ef20814520b46c6dc"} Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.444732 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"be92cba4-83b4-4f7f-b645-972cbc86c55a","Type":"ContainerStarted","Data":"36397954e18a3aa46417e79a7f9a4d37815acb719d566fa138f2ed12fe346d21"} Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.482958 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.48292859 podStartE2EDuration="2.48292859s" podCreationTimestamp="2026-01-06 08:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:36:59.473967271 +0000 UTC m=+1321.520140118" watchObservedRunningTime="2026-01-06 08:36:59.48292859 +0000 UTC m=+1321.529101427" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.502897 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.515078 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.528627 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 06 08:36:59 crc kubenswrapper[4784]: E0106 08:36:59.529233 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eaa292ea-ca6c-44b0-9dc5-b4436c657c3f" containerName="kube-state-metrics" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.529264 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="eaa292ea-ca6c-44b0-9dc5-b4436c657c3f" containerName="kube-state-metrics" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.529572 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="eaa292ea-ca6c-44b0-9dc5-b4436c657c3f" containerName="kube-state-metrics" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.530390 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.533158 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.541137 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.548671 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.602598 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dg595\" (UniqueName: \"kubernetes.io/projected/f1733b30-f3c1-414f-9140-f42583e97d31-kube-api-access-dg595\") pod \"kube-state-metrics-0\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " pod="openstack/kube-state-metrics-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.602739 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " pod="openstack/kube-state-metrics-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.602810 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " pod="openstack/kube-state-metrics-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.602844 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " pod="openstack/kube-state-metrics-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.705444 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " pod="openstack/kube-state-metrics-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.705713 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " pod="openstack/kube-state-metrics-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.705780 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " pod="openstack/kube-state-metrics-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.705882 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dg595\" (UniqueName: \"kubernetes.io/projected/f1733b30-f3c1-414f-9140-f42583e97d31-kube-api-access-dg595\") pod \"kube-state-metrics-0\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " pod="openstack/kube-state-metrics-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.711847 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " pod="openstack/kube-state-metrics-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.712882 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " pod="openstack/kube-state-metrics-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.713081 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " pod="openstack/kube-state-metrics-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.734816 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dg595\" (UniqueName: \"kubernetes.io/projected/f1733b30-f3c1-414f-9140-f42583e97d31-kube-api-access-dg595\") pod \"kube-state-metrics-0\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " pod="openstack/kube-state-metrics-0" Jan 06 08:36:59 crc kubenswrapper[4784]: I0106 08:36:59.864514 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 06 08:37:00 crc kubenswrapper[4784]: I0106 08:37:00.250740 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:37:00 crc kubenswrapper[4784]: I0106 08:37:00.251878 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="ceilometer-central-agent" containerID="cri-o://df6b4ff4ec1813373c879a78abfb7d42271c55b79f5ad94c0957e1d4fa80e024" gracePeriod=30 Jan 06 08:37:00 crc kubenswrapper[4784]: I0106 08:37:00.251922 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="proxy-httpd" containerID="cri-o://81b20e5fca22566e8b8fc266bdff072228a3d355b9a05d135474683106731e55" gracePeriod=30 Jan 06 08:37:00 crc kubenswrapper[4784]: I0106 08:37:00.251971 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="sg-core" containerID="cri-o://0a808ac1dce6336f7ef9630eb1360c90430154927b47f863c65eb69120f1b98e" gracePeriod=30 Jan 06 08:37:00 crc kubenswrapper[4784]: I0106 08:37:00.252140 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="ceilometer-notification-agent" containerID="cri-o://1c0f308eb62410e344ceec6e1c1967af77eb816328fd19172f47e8bdbe4ceeb0" gracePeriod=30 Jan 06 08:37:00 crc kubenswrapper[4784]: I0106 08:37:00.324052 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eaa292ea-ca6c-44b0-9dc5-b4436c657c3f" path="/var/lib/kubelet/pods/eaa292ea-ca6c-44b0-9dc5-b4436c657c3f/volumes" Jan 06 08:37:00 crc kubenswrapper[4784]: I0106 08:37:00.459742 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 06 08:37:00 crc kubenswrapper[4784]: W0106 08:37:00.467998 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf1733b30_f3c1_414f_9140_f42583e97d31.slice/crio-7c53dd49a7ff8ddf0cbc34e0235ddc869165070d048f89a8bb5c939a53991212 WatchSource:0}: Error finding container 7c53dd49a7ff8ddf0cbc34e0235ddc869165070d048f89a8bb5c939a53991212: Status 404 returned error can't find the container with id 7c53dd49a7ff8ddf0cbc34e0235ddc869165070d048f89a8bb5c939a53991212 Jan 06 08:37:00 crc kubenswrapper[4784]: I0106 08:37:00.471111 4784 generic.go:334] "Generic (PLEG): container finished" podID="057af17f-86ff-476f-af9c-6efa26be4a78" containerID="0a808ac1dce6336f7ef9630eb1360c90430154927b47f863c65eb69120f1b98e" exitCode=2 Jan 06 08:37:00 crc kubenswrapper[4784]: I0106 08:37:00.471299 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"057af17f-86ff-476f-af9c-6efa26be4a78","Type":"ContainerDied","Data":"0a808ac1dce6336f7ef9630eb1360c90430154927b47f863c65eb69120f1b98e"} Jan 06 08:37:00 crc kubenswrapper[4784]: I0106 08:37:00.501261 4784 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 06 08:37:01 crc kubenswrapper[4784]: I0106 08:37:01.488282 4784 generic.go:334] "Generic (PLEG): container finished" podID="057af17f-86ff-476f-af9c-6efa26be4a78" containerID="81b20e5fca22566e8b8fc266bdff072228a3d355b9a05d135474683106731e55" exitCode=0 Jan 06 08:37:01 crc kubenswrapper[4784]: I0106 08:37:01.488675 4784 generic.go:334] "Generic (PLEG): container finished" podID="057af17f-86ff-476f-af9c-6efa26be4a78" containerID="df6b4ff4ec1813373c879a78abfb7d42271c55b79f5ad94c0957e1d4fa80e024" exitCode=0 Jan 06 08:37:01 crc kubenswrapper[4784]: I0106 08:37:01.488726 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"057af17f-86ff-476f-af9c-6efa26be4a78","Type":"ContainerDied","Data":"81b20e5fca22566e8b8fc266bdff072228a3d355b9a05d135474683106731e55"} Jan 06 08:37:01 crc kubenswrapper[4784]: I0106 08:37:01.488766 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"057af17f-86ff-476f-af9c-6efa26be4a78","Type":"ContainerDied","Data":"df6b4ff4ec1813373c879a78abfb7d42271c55b79f5ad94c0957e1d4fa80e024"} Jan 06 08:37:01 crc kubenswrapper[4784]: I0106 08:37:01.492205 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f1733b30-f3c1-414f-9140-f42583e97d31","Type":"ContainerStarted","Data":"6b3f7fd4b97b2de294cdc7d27d5014fd5bf46b02dd68f76b1ad9fb9236ae1bba"} Jan 06 08:37:01 crc kubenswrapper[4784]: I0106 08:37:01.492233 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f1733b30-f3c1-414f-9140-f42583e97d31","Type":"ContainerStarted","Data":"7c53dd49a7ff8ddf0cbc34e0235ddc869165070d048f89a8bb5c939a53991212"} Jan 06 08:37:01 crc kubenswrapper[4784]: I0106 08:37:01.494084 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 06 08:37:01 crc kubenswrapper[4784]: I0106 08:37:01.521926 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.158641309 podStartE2EDuration="2.521899271s" podCreationTimestamp="2026-01-06 08:36:59 +0000 UTC" firstStartedPulling="2026-01-06 08:37:00.500861517 +0000 UTC m=+1322.547034374" lastFinishedPulling="2026-01-06 08:37:00.864119499 +0000 UTC m=+1322.910292336" observedRunningTime="2026-01-06 08:37:01.509532116 +0000 UTC m=+1323.555704983" watchObservedRunningTime="2026-01-06 08:37:01.521899271 +0000 UTC m=+1323.568072108" Jan 06 08:37:02 crc kubenswrapper[4784]: I0106 08:37:02.811288 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.060913 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.078395 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-scripts\") pod \"057af17f-86ff-476f-af9c-6efa26be4a78\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.078580 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/057af17f-86ff-476f-af9c-6efa26be4a78-run-httpd\") pod \"057af17f-86ff-476f-af9c-6efa26be4a78\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.078637 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r95x5\" (UniqueName: \"kubernetes.io/projected/057af17f-86ff-476f-af9c-6efa26be4a78-kube-api-access-r95x5\") pod \"057af17f-86ff-476f-af9c-6efa26be4a78\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.078662 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/057af17f-86ff-476f-af9c-6efa26be4a78-log-httpd\") pod \"057af17f-86ff-476f-af9c-6efa26be4a78\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.078722 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-combined-ca-bundle\") pod \"057af17f-86ff-476f-af9c-6efa26be4a78\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.078980 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-config-data\") pod \"057af17f-86ff-476f-af9c-6efa26be4a78\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.079051 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-sg-core-conf-yaml\") pod \"057af17f-86ff-476f-af9c-6efa26be4a78\" (UID: \"057af17f-86ff-476f-af9c-6efa26be4a78\") " Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.079268 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/057af17f-86ff-476f-af9c-6efa26be4a78-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "057af17f-86ff-476f-af9c-6efa26be4a78" (UID: "057af17f-86ff-476f-af9c-6efa26be4a78"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.079655 4784 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/057af17f-86ff-476f-af9c-6efa26be4a78-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.079645 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/057af17f-86ff-476f-af9c-6efa26be4a78-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "057af17f-86ff-476f-af9c-6efa26be4a78" (UID: "057af17f-86ff-476f-af9c-6efa26be4a78"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.146698 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-scripts" (OuterVolumeSpecName: "scripts") pod "057af17f-86ff-476f-af9c-6efa26be4a78" (UID: "057af17f-86ff-476f-af9c-6efa26be4a78"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.162905 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/057af17f-86ff-476f-af9c-6efa26be4a78-kube-api-access-r95x5" (OuterVolumeSpecName: "kube-api-access-r95x5") pod "057af17f-86ff-476f-af9c-6efa26be4a78" (UID: "057af17f-86ff-476f-af9c-6efa26be4a78"). InnerVolumeSpecName "kube-api-access-r95x5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.179218 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "057af17f-86ff-476f-af9c-6efa26be4a78" (UID: "057af17f-86ff-476f-af9c-6efa26be4a78"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.187060 4784 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.187444 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.187607 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r95x5\" (UniqueName: \"kubernetes.io/projected/057af17f-86ff-476f-af9c-6efa26be4a78-kube-api-access-r95x5\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.187745 4784 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/057af17f-86ff-476f-af9c-6efa26be4a78-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.255771 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "057af17f-86ff-476f-af9c-6efa26be4a78" (UID: "057af17f-86ff-476f-af9c-6efa26be4a78"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.279665 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-config-data" (OuterVolumeSpecName: "config-data") pod "057af17f-86ff-476f-af9c-6efa26be4a78" (UID: "057af17f-86ff-476f-af9c-6efa26be4a78"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.290090 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.290118 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/057af17f-86ff-476f-af9c-6efa26be4a78-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.515832 4784 generic.go:334] "Generic (PLEG): container finished" podID="057af17f-86ff-476f-af9c-6efa26be4a78" containerID="1c0f308eb62410e344ceec6e1c1967af77eb816328fd19172f47e8bdbe4ceeb0" exitCode=0 Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.516019 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"057af17f-86ff-476f-af9c-6efa26be4a78","Type":"ContainerDied","Data":"1c0f308eb62410e344ceec6e1c1967af77eb816328fd19172f47e8bdbe4ceeb0"} Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.516755 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"057af17f-86ff-476f-af9c-6efa26be4a78","Type":"ContainerDied","Data":"33122c7976b71953d16c6a62e84489d69d0e82fceac777319e01e873eb88067f"} Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.516804 4784 scope.go:117] "RemoveContainer" containerID="81b20e5fca22566e8b8fc266bdff072228a3d355b9a05d135474683106731e55" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.516153 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.545080 4784 scope.go:117] "RemoveContainer" containerID="0a808ac1dce6336f7ef9630eb1360c90430154927b47f863c65eb69120f1b98e" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.553864 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.575933 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.586473 4784 scope.go:117] "RemoveContainer" containerID="1c0f308eb62410e344ceec6e1c1967af77eb816328fd19172f47e8bdbe4ceeb0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.594565 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:37:03 crc kubenswrapper[4784]: E0106 08:37:03.596773 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="ceilometer-notification-agent" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.596939 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="ceilometer-notification-agent" Jan 06 08:37:03 crc kubenswrapper[4784]: E0106 08:37:03.597056 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="sg-core" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.597124 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="sg-core" Jan 06 08:37:03 crc kubenswrapper[4784]: E0106 08:37:03.597213 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="proxy-httpd" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.597284 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="proxy-httpd" Jan 06 08:37:03 crc kubenswrapper[4784]: E0106 08:37:03.598665 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="ceilometer-central-agent" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.598769 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="ceilometer-central-agent" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.599136 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="ceilometer-central-agent" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.599276 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="proxy-httpd" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.599388 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="ceilometer-notification-agent" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.599473 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" containerName="sg-core" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.602323 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.609295 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.612652 4784 scope.go:117] "RemoveContainer" containerID="df6b4ff4ec1813373c879a78abfb7d42271c55b79f5ad94c0957e1d4fa80e024" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.645867 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.645944 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.646520 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.673044 4784 scope.go:117] "RemoveContainer" containerID="81b20e5fca22566e8b8fc266bdff072228a3d355b9a05d135474683106731e55" Jan 06 08:37:03 crc kubenswrapper[4784]: E0106 08:37:03.676364 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81b20e5fca22566e8b8fc266bdff072228a3d355b9a05d135474683106731e55\": container with ID starting with 81b20e5fca22566e8b8fc266bdff072228a3d355b9a05d135474683106731e55 not found: ID does not exist" containerID="81b20e5fca22566e8b8fc266bdff072228a3d355b9a05d135474683106731e55" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.676421 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81b20e5fca22566e8b8fc266bdff072228a3d355b9a05d135474683106731e55"} err="failed to get container status \"81b20e5fca22566e8b8fc266bdff072228a3d355b9a05d135474683106731e55\": rpc error: code = NotFound desc = could not find container \"81b20e5fca22566e8b8fc266bdff072228a3d355b9a05d135474683106731e55\": container with ID starting with 81b20e5fca22566e8b8fc266bdff072228a3d355b9a05d135474683106731e55 not found: ID does not exist" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.676455 4784 scope.go:117] "RemoveContainer" containerID="0a808ac1dce6336f7ef9630eb1360c90430154927b47f863c65eb69120f1b98e" Jan 06 08:37:03 crc kubenswrapper[4784]: E0106 08:37:03.678824 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a808ac1dce6336f7ef9630eb1360c90430154927b47f863c65eb69120f1b98e\": container with ID starting with 0a808ac1dce6336f7ef9630eb1360c90430154927b47f863c65eb69120f1b98e not found: ID does not exist" containerID="0a808ac1dce6336f7ef9630eb1360c90430154927b47f863c65eb69120f1b98e" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.678877 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a808ac1dce6336f7ef9630eb1360c90430154927b47f863c65eb69120f1b98e"} err="failed to get container status \"0a808ac1dce6336f7ef9630eb1360c90430154927b47f863c65eb69120f1b98e\": rpc error: code = NotFound desc = could not find container \"0a808ac1dce6336f7ef9630eb1360c90430154927b47f863c65eb69120f1b98e\": container with ID starting with 0a808ac1dce6336f7ef9630eb1360c90430154927b47f863c65eb69120f1b98e not found: ID does not exist" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.678910 4784 scope.go:117] "RemoveContainer" containerID="1c0f308eb62410e344ceec6e1c1967af77eb816328fd19172f47e8bdbe4ceeb0" Jan 06 08:37:03 crc kubenswrapper[4784]: E0106 08:37:03.684153 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c0f308eb62410e344ceec6e1c1967af77eb816328fd19172f47e8bdbe4ceeb0\": container with ID starting with 1c0f308eb62410e344ceec6e1c1967af77eb816328fd19172f47e8bdbe4ceeb0 not found: ID does not exist" containerID="1c0f308eb62410e344ceec6e1c1967af77eb816328fd19172f47e8bdbe4ceeb0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.684198 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c0f308eb62410e344ceec6e1c1967af77eb816328fd19172f47e8bdbe4ceeb0"} err="failed to get container status \"1c0f308eb62410e344ceec6e1c1967af77eb816328fd19172f47e8bdbe4ceeb0\": rpc error: code = NotFound desc = could not find container \"1c0f308eb62410e344ceec6e1c1967af77eb816328fd19172f47e8bdbe4ceeb0\": container with ID starting with 1c0f308eb62410e344ceec6e1c1967af77eb816328fd19172f47e8bdbe4ceeb0 not found: ID does not exist" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.684226 4784 scope.go:117] "RemoveContainer" containerID="df6b4ff4ec1813373c879a78abfb7d42271c55b79f5ad94c0957e1d4fa80e024" Jan 06 08:37:03 crc kubenswrapper[4784]: E0106 08:37:03.684734 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df6b4ff4ec1813373c879a78abfb7d42271c55b79f5ad94c0957e1d4fa80e024\": container with ID starting with df6b4ff4ec1813373c879a78abfb7d42271c55b79f5ad94c0957e1d4fa80e024 not found: ID does not exist" containerID="df6b4ff4ec1813373c879a78abfb7d42271c55b79f5ad94c0957e1d4fa80e024" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.684767 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df6b4ff4ec1813373c879a78abfb7d42271c55b79f5ad94c0957e1d4fa80e024"} err="failed to get container status \"df6b4ff4ec1813373c879a78abfb7d42271c55b79f5ad94c0957e1d4fa80e024\": rpc error: code = NotFound desc = could not find container \"df6b4ff4ec1813373c879a78abfb7d42271c55b79f5ad94c0957e1d4fa80e024\": container with ID starting with df6b4ff4ec1813373c879a78abfb7d42271c55b79f5ad94c0957e1d4fa80e024 not found: ID does not exist" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.698805 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-config-data\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.698887 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rws66\" (UniqueName: \"kubernetes.io/projected/60938a41-67bb-45e6-a772-7cff69507075-kube-api-access-rws66\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.699225 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.699263 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60938a41-67bb-45e6-a772-7cff69507075-log-httpd\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.699393 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.699429 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-scripts\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.699637 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.699777 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60938a41-67bb-45e6-a772-7cff69507075-run-httpd\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.737839 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.775405 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.801627 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-config-data\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.802488 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rws66\" (UniqueName: \"kubernetes.io/projected/60938a41-67bb-45e6-a772-7cff69507075-kube-api-access-rws66\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.802633 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.802655 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60938a41-67bb-45e6-a772-7cff69507075-log-httpd\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.802741 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.802761 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-scripts\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.802800 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.802835 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60938a41-67bb-45e6-a772-7cff69507075-run-httpd\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.804712 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60938a41-67bb-45e6-a772-7cff69507075-log-httpd\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.806849 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60938a41-67bb-45e6-a772-7cff69507075-run-httpd\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.807928 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.808865 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.809598 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-scripts\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.810434 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-config-data\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.812805 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:03 crc kubenswrapper[4784]: I0106 08:37:03.828275 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rws66\" (UniqueName: \"kubernetes.io/projected/60938a41-67bb-45e6-a772-7cff69507075-kube-api-access-rws66\") pod \"ceilometer-0\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " pod="openstack/ceilometer-0" Jan 06 08:37:04 crc kubenswrapper[4784]: I0106 08:37:04.011072 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:37:04 crc kubenswrapper[4784]: I0106 08:37:04.325210 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="057af17f-86ff-476f-af9c-6efa26be4a78" path="/var/lib/kubelet/pods/057af17f-86ff-476f-af9c-6efa26be4a78/volumes" Jan 06 08:37:04 crc kubenswrapper[4784]: I0106 08:37:04.513965 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:37:04 crc kubenswrapper[4784]: W0106 08:37:04.519852 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod60938a41_67bb_45e6_a772_7cff69507075.slice/crio-f57799f8b069aa81d298ee714c7defbc0b28a2c1b5a31aec6580bbafae755882 WatchSource:0}: Error finding container f57799f8b069aa81d298ee714c7defbc0b28a2c1b5a31aec6580bbafae755882: Status 404 returned error can't find the container with id f57799f8b069aa81d298ee714c7defbc0b28a2c1b5a31aec6580bbafae755882 Jan 06 08:37:04 crc kubenswrapper[4784]: I0106 08:37:04.564593 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 06 08:37:05 crc kubenswrapper[4784]: I0106 08:37:05.581792 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60938a41-67bb-45e6-a772-7cff69507075","Type":"ContainerStarted","Data":"682b10882c813ead43ddd358e909b8b59a88e2d111e716dbda41ad59cb6c2121"} Jan 06 08:37:05 crc kubenswrapper[4784]: I0106 08:37:05.582421 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60938a41-67bb-45e6-a772-7cff69507075","Type":"ContainerStarted","Data":"f57799f8b069aa81d298ee714c7defbc0b28a2c1b5a31aec6580bbafae755882"} Jan 06 08:37:06 crc kubenswrapper[4784]: I0106 08:37:06.594025 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60938a41-67bb-45e6-a772-7cff69507075","Type":"ContainerStarted","Data":"98c325bce6b04aba8ef722da43396d1618c2f18c4aae21b76be721b2fbff6f8d"} Jan 06 08:37:07 crc kubenswrapper[4784]: I0106 08:37:07.620105 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60938a41-67bb-45e6-a772-7cff69507075","Type":"ContainerStarted","Data":"d5eab53b0bfd670e63c15e628bea8d1a56527e87be1ffb98f8d20d36b8806f44"} Jan 06 08:37:07 crc kubenswrapper[4784]: I0106 08:37:07.821730 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 06 08:37:07 crc kubenswrapper[4784]: I0106 08:37:07.821802 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 06 08:37:08 crc kubenswrapper[4784]: I0106 08:37:08.631786 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60938a41-67bb-45e6-a772-7cff69507075","Type":"ContainerStarted","Data":"4c7f264d6e835d5bcfd927a67aeab262e2395c4cc89a67f742f800c7f83bd73c"} Jan 06 08:37:08 crc kubenswrapper[4784]: I0106 08:37:08.632900 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 06 08:37:08 crc kubenswrapper[4784]: I0106 08:37:08.919735 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="be92cba4-83b4-4f7f-b645-972cbc86c55a" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.193:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 06 08:37:08 crc kubenswrapper[4784]: I0106 08:37:08.920204 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="be92cba4-83b4-4f7f-b645-972cbc86c55a" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.193:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 06 08:37:09 crc kubenswrapper[4784]: I0106 08:37:09.882084 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 06 08:37:09 crc kubenswrapper[4784]: I0106 08:37:09.905925 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.425204663 podStartE2EDuration="6.905901709s" podCreationTimestamp="2026-01-06 08:37:03 +0000 UTC" firstStartedPulling="2026-01-06 08:37:04.52392766 +0000 UTC m=+1326.570100497" lastFinishedPulling="2026-01-06 08:37:08.004624706 +0000 UTC m=+1330.050797543" observedRunningTime="2026-01-06 08:37:08.683023 +0000 UTC m=+1330.729195837" watchObservedRunningTime="2026-01-06 08:37:09.905901709 +0000 UTC m=+1331.952074556" Jan 06 08:37:11 crc kubenswrapper[4784]: I0106 08:37:11.080459 4784 scope.go:117] "RemoveContainer" containerID="7bfb4e04e5636761f9384f225058f3733a99395c19b81d45d462db0e0da55072" Jan 06 08:37:11 crc kubenswrapper[4784]: I0106 08:37:11.123864 4784 scope.go:117] "RemoveContainer" containerID="af6c59ff88de5778be22030ba89d277867d5087fe8ed09edfc7a18e5aa079548" Jan 06 08:37:11 crc kubenswrapper[4784]: I0106 08:37:11.179765 4784 scope.go:117] "RemoveContainer" containerID="1641bb16d77e9ff2f93b4e88b9310b248fb6197270b27cd23c689b65a871ebca" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.668644 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.672667 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4t7vv\" (UniqueName: \"kubernetes.io/projected/7f1895a1-2969-494e-b2eb-8f13b0816697-kube-api-access-4t7vv\") pod \"7f1895a1-2969-494e-b2eb-8f13b0816697\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.672714 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f1895a1-2969-494e-b2eb-8f13b0816697-config-data\") pod \"7f1895a1-2969-494e-b2eb-8f13b0816697\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.672795 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f1895a1-2969-494e-b2eb-8f13b0816697-combined-ca-bundle\") pod \"7f1895a1-2969-494e-b2eb-8f13b0816697\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.672943 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7f1895a1-2969-494e-b2eb-8f13b0816697-logs\") pod \"7f1895a1-2969-494e-b2eb-8f13b0816697\" (UID: \"7f1895a1-2969-494e-b2eb-8f13b0816697\") " Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.674275 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f1895a1-2969-494e-b2eb-8f13b0816697-logs" (OuterVolumeSpecName: "logs") pod "7f1895a1-2969-494e-b2eb-8f13b0816697" (UID: "7f1895a1-2969-494e-b2eb-8f13b0816697"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.683118 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f1895a1-2969-494e-b2eb-8f13b0816697-kube-api-access-4t7vv" (OuterVolumeSpecName: "kube-api-access-4t7vv") pod "7f1895a1-2969-494e-b2eb-8f13b0816697" (UID: "7f1895a1-2969-494e-b2eb-8f13b0816697"). InnerVolumeSpecName "kube-api-access-4t7vv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.683316 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.728161 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f1895a1-2969-494e-b2eb-8f13b0816697-config-data" (OuterVolumeSpecName: "config-data") pod "7f1895a1-2969-494e-b2eb-8f13b0816697" (UID: "7f1895a1-2969-494e-b2eb-8f13b0816697"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.729952 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f1895a1-2969-494e-b2eb-8f13b0816697-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7f1895a1-2969-494e-b2eb-8f13b0816697" (UID: "7f1895a1-2969-494e-b2eb-8f13b0816697"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.740299 4784 generic.go:334] "Generic (PLEG): container finished" podID="7f1895a1-2969-494e-b2eb-8f13b0816697" containerID="608871eb0eaf96d60136bab2a358ab97e7ed119acce127880111f057fabfa2e8" exitCode=137 Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.740415 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7f1895a1-2969-494e-b2eb-8f13b0816697","Type":"ContainerDied","Data":"608871eb0eaf96d60136bab2a358ab97e7ed119acce127880111f057fabfa2e8"} Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.740465 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7f1895a1-2969-494e-b2eb-8f13b0816697","Type":"ContainerDied","Data":"f7a8af4261e72c14122437e284dfa9efe90bd8efa3270ecde2324551f80d6c14"} Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.740494 4784 scope.go:117] "RemoveContainer" containerID="608871eb0eaf96d60136bab2a358ab97e7ed119acce127880111f057fabfa2e8" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.740714 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.749288 4784 generic.go:334] "Generic (PLEG): container finished" podID="470ef208-e9ff-49ee-ae66-212a38542ab8" containerID="ac978d98243fe0f46db22584b12130d78fc5bb3e5c3f2cfb67c1c3e6a80249f3" exitCode=137 Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.749341 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"470ef208-e9ff-49ee-ae66-212a38542ab8","Type":"ContainerDied","Data":"ac978d98243fe0f46db22584b12130d78fc5bb3e5c3f2cfb67c1c3e6a80249f3"} Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.749376 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"470ef208-e9ff-49ee-ae66-212a38542ab8","Type":"ContainerDied","Data":"f69a8ac89a58115986b0639a88c1deaf1080ebfde40372ca7ff446f57760ff62"} Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.749443 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.775134 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f1895a1-2969-494e-b2eb-8f13b0816697-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.775172 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7f1895a1-2969-494e-b2eb-8f13b0816697-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.775185 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4t7vv\" (UniqueName: \"kubernetes.io/projected/7f1895a1-2969-494e-b2eb-8f13b0816697-kube-api-access-4t7vv\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.775199 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f1895a1-2969-494e-b2eb-8f13b0816697-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.817098 4784 scope.go:117] "RemoveContainer" containerID="665fc8185924982519245618aac648ee2f104e518ce4efc0636fe9f48626a3b9" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.830186 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.864633 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.884880 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/470ef208-e9ff-49ee-ae66-212a38542ab8-config-data\") pod \"470ef208-e9ff-49ee-ae66-212a38542ab8\" (UID: \"470ef208-e9ff-49ee-ae66-212a38542ab8\") " Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.885039 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/470ef208-e9ff-49ee-ae66-212a38542ab8-combined-ca-bundle\") pod \"470ef208-e9ff-49ee-ae66-212a38542ab8\" (UID: \"470ef208-e9ff-49ee-ae66-212a38542ab8\") " Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.885347 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzg2f\" (UniqueName: \"kubernetes.io/projected/470ef208-e9ff-49ee-ae66-212a38542ab8-kube-api-access-tzg2f\") pod \"470ef208-e9ff-49ee-ae66-212a38542ab8\" (UID: \"470ef208-e9ff-49ee-ae66-212a38542ab8\") " Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.887708 4784 scope.go:117] "RemoveContainer" containerID="608871eb0eaf96d60136bab2a358ab97e7ed119acce127880111f057fabfa2e8" Jan 06 08:37:15 crc kubenswrapper[4784]: E0106 08:37:15.889071 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"608871eb0eaf96d60136bab2a358ab97e7ed119acce127880111f057fabfa2e8\": container with ID starting with 608871eb0eaf96d60136bab2a358ab97e7ed119acce127880111f057fabfa2e8 not found: ID does not exist" containerID="608871eb0eaf96d60136bab2a358ab97e7ed119acce127880111f057fabfa2e8" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.889145 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"608871eb0eaf96d60136bab2a358ab97e7ed119acce127880111f057fabfa2e8"} err="failed to get container status \"608871eb0eaf96d60136bab2a358ab97e7ed119acce127880111f057fabfa2e8\": rpc error: code = NotFound desc = could not find container \"608871eb0eaf96d60136bab2a358ab97e7ed119acce127880111f057fabfa2e8\": container with ID starting with 608871eb0eaf96d60136bab2a358ab97e7ed119acce127880111f057fabfa2e8 not found: ID does not exist" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.889211 4784 scope.go:117] "RemoveContainer" containerID="665fc8185924982519245618aac648ee2f104e518ce4efc0636fe9f48626a3b9" Jan 06 08:37:15 crc kubenswrapper[4784]: E0106 08:37:15.890155 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"665fc8185924982519245618aac648ee2f104e518ce4efc0636fe9f48626a3b9\": container with ID starting with 665fc8185924982519245618aac648ee2f104e518ce4efc0636fe9f48626a3b9 not found: ID does not exist" containerID="665fc8185924982519245618aac648ee2f104e518ce4efc0636fe9f48626a3b9" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.890246 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"665fc8185924982519245618aac648ee2f104e518ce4efc0636fe9f48626a3b9"} err="failed to get container status \"665fc8185924982519245618aac648ee2f104e518ce4efc0636fe9f48626a3b9\": rpc error: code = NotFound desc = could not find container \"665fc8185924982519245618aac648ee2f104e518ce4efc0636fe9f48626a3b9\": container with ID starting with 665fc8185924982519245618aac648ee2f104e518ce4efc0636fe9f48626a3b9 not found: ID does not exist" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.890311 4784 scope.go:117] "RemoveContainer" containerID="ac978d98243fe0f46db22584b12130d78fc5bb3e5c3f2cfb67c1c3e6a80249f3" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.892336 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/470ef208-e9ff-49ee-ae66-212a38542ab8-kube-api-access-tzg2f" (OuterVolumeSpecName: "kube-api-access-tzg2f") pod "470ef208-e9ff-49ee-ae66-212a38542ab8" (UID: "470ef208-e9ff-49ee-ae66-212a38542ab8"). InnerVolumeSpecName "kube-api-access-tzg2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.901691 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:37:15 crc kubenswrapper[4784]: E0106 08:37:15.902301 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f1895a1-2969-494e-b2eb-8f13b0816697" containerName="nova-metadata-metadata" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.902330 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f1895a1-2969-494e-b2eb-8f13b0816697" containerName="nova-metadata-metadata" Jan 06 08:37:15 crc kubenswrapper[4784]: E0106 08:37:15.902348 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="470ef208-e9ff-49ee-ae66-212a38542ab8" containerName="nova-cell1-novncproxy-novncproxy" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.902357 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="470ef208-e9ff-49ee-ae66-212a38542ab8" containerName="nova-cell1-novncproxy-novncproxy" Jan 06 08:37:15 crc kubenswrapper[4784]: E0106 08:37:15.902371 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f1895a1-2969-494e-b2eb-8f13b0816697" containerName="nova-metadata-log" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.902378 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f1895a1-2969-494e-b2eb-8f13b0816697" containerName="nova-metadata-log" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.902630 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f1895a1-2969-494e-b2eb-8f13b0816697" containerName="nova-metadata-log" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.902657 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f1895a1-2969-494e-b2eb-8f13b0816697" containerName="nova-metadata-metadata" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.902676 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="470ef208-e9ff-49ee-ae66-212a38542ab8" containerName="nova-cell1-novncproxy-novncproxy" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.904295 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.908017 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.908301 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.918421 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.919393 4784 scope.go:117] "RemoveContainer" containerID="ac978d98243fe0f46db22584b12130d78fc5bb3e5c3f2cfb67c1c3e6a80249f3" Jan 06 08:37:15 crc kubenswrapper[4784]: E0106 08:37:15.920052 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac978d98243fe0f46db22584b12130d78fc5bb3e5c3f2cfb67c1c3e6a80249f3\": container with ID starting with ac978d98243fe0f46db22584b12130d78fc5bb3e5c3f2cfb67c1c3e6a80249f3 not found: ID does not exist" containerID="ac978d98243fe0f46db22584b12130d78fc5bb3e5c3f2cfb67c1c3e6a80249f3" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.920096 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac978d98243fe0f46db22584b12130d78fc5bb3e5c3f2cfb67c1c3e6a80249f3"} err="failed to get container status \"ac978d98243fe0f46db22584b12130d78fc5bb3e5c3f2cfb67c1c3e6a80249f3\": rpc error: code = NotFound desc = could not find container \"ac978d98243fe0f46db22584b12130d78fc5bb3e5c3f2cfb67c1c3e6a80249f3\": container with ID starting with ac978d98243fe0f46db22584b12130d78fc5bb3e5c3f2cfb67c1c3e6a80249f3 not found: ID does not exist" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.926851 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/470ef208-e9ff-49ee-ae66-212a38542ab8-config-data" (OuterVolumeSpecName: "config-data") pod "470ef208-e9ff-49ee-ae66-212a38542ab8" (UID: "470ef208-e9ff-49ee-ae66-212a38542ab8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.931902 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/470ef208-e9ff-49ee-ae66-212a38542ab8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "470ef208-e9ff-49ee-ae66-212a38542ab8" (UID: "470ef208-e9ff-49ee-ae66-212a38542ab8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.991618 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/470ef208-e9ff-49ee-ae66-212a38542ab8-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.992158 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/470ef208-e9ff-49ee-ae66-212a38542ab8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:15 crc kubenswrapper[4784]: I0106 08:37:15.992177 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzg2f\" (UniqueName: \"kubernetes.io/projected/470ef208-e9ff-49ee-ae66-212a38542ab8-kube-api-access-tzg2f\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.091114 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.093982 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-config-data\") pod \"nova-metadata-0\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.094093 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-logs\") pod \"nova-metadata-0\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.094123 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.094155 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.094194 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5rbk\" (UniqueName: \"kubernetes.io/projected/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-kube-api-access-g5rbk\") pod \"nova-metadata-0\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.098935 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.135008 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.136763 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.139702 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.142696 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.142828 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.158886 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.197013 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.197085 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.197136 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-logs\") pod \"nova-metadata-0\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.197177 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.197205 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.197244 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hz2gr\" (UniqueName: \"kubernetes.io/projected/2585ada6-db24-4639-9f3b-d52919149935-kube-api-access-hz2gr\") pod \"nova-cell1-novncproxy-0\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.197269 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.197322 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5rbk\" (UniqueName: \"kubernetes.io/projected/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-kube-api-access-g5rbk\") pod \"nova-metadata-0\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.197420 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-config-data\") pod \"nova-metadata-0\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.197502 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.198379 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-logs\") pod \"nova-metadata-0\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.204101 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-config-data\") pod \"nova-metadata-0\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.205467 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.206269 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.215962 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5rbk\" (UniqueName: \"kubernetes.io/projected/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-kube-api-access-g5rbk\") pod \"nova-metadata-0\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.227123 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.299279 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.299372 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.299407 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.299442 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.299471 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hz2gr\" (UniqueName: \"kubernetes.io/projected/2585ada6-db24-4639-9f3b-d52919149935-kube-api-access-hz2gr\") pod \"nova-cell1-novncproxy-0\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.306580 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.308616 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.313177 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.319708 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.324708 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hz2gr\" (UniqueName: \"kubernetes.io/projected/2585ada6-db24-4639-9f3b-d52919149935-kube-api-access-hz2gr\") pod \"nova-cell1-novncproxy-0\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.328987 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="470ef208-e9ff-49ee-ae66-212a38542ab8" path="/var/lib/kubelet/pods/470ef208-e9ff-49ee-ae66-212a38542ab8/volumes" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.329654 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f1895a1-2969-494e-b2eb-8f13b0816697" path="/var/lib/kubelet/pods/7f1895a1-2969-494e-b2eb-8f13b0816697/volumes" Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.453368 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:16 crc kubenswrapper[4784]: W0106 08:37:16.716644 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf6a2326e_d6a9_4fc4_b271_ea0a8e1bf579.slice/crio-d3df8a4a5315843186276eb898731510783e7a3b4482004d3d7e943a3ac4b51e WatchSource:0}: Error finding container d3df8a4a5315843186276eb898731510783e7a3b4482004d3d7e943a3ac4b51e: Status 404 returned error can't find the container with id d3df8a4a5315843186276eb898731510783e7a3b4482004d3d7e943a3ac4b51e Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.721582 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.763169 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579","Type":"ContainerStarted","Data":"d3df8a4a5315843186276eb898731510783e7a3b4482004d3d7e943a3ac4b51e"} Jan 06 08:37:16 crc kubenswrapper[4784]: I0106 08:37:16.942584 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 06 08:37:17 crc kubenswrapper[4784]: I0106 08:37:17.786787 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2585ada6-db24-4639-9f3b-d52919149935","Type":"ContainerStarted","Data":"fea8e4389874b9bc56d905c5919e4d5d64c1dda16388208e5bca742ee7cd64e7"} Jan 06 08:37:17 crc kubenswrapper[4784]: I0106 08:37:17.787327 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2585ada6-db24-4639-9f3b-d52919149935","Type":"ContainerStarted","Data":"3a704d27ae2f01d7db2bb0f948fd9bf68d3f5ec6d06280fb5e4ed4e0bb1d5730"} Jan 06 08:37:17 crc kubenswrapper[4784]: I0106 08:37:17.795476 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579","Type":"ContainerStarted","Data":"a289f9adf484697b5e2e7513e09a34e6a2215f6e2443766fc229cb4cc88bd7ac"} Jan 06 08:37:17 crc kubenswrapper[4784]: I0106 08:37:17.795573 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579","Type":"ContainerStarted","Data":"cf52ed937a7eb23c4e9f98d98a78f01d34ba3a53d8fa4db35cca24da66e91bc1"} Jan 06 08:37:17 crc kubenswrapper[4784]: I0106 08:37:17.825102 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=1.825083964 podStartE2EDuration="1.825083964s" podCreationTimestamp="2026-01-06 08:37:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:37:17.821274296 +0000 UTC m=+1339.867447143" watchObservedRunningTime="2026-01-06 08:37:17.825083964 +0000 UTC m=+1339.871256791" Jan 06 08:37:17 crc kubenswrapper[4784]: I0106 08:37:17.841889 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 06 08:37:17 crc kubenswrapper[4784]: I0106 08:37:17.842034 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 06 08:37:17 crc kubenswrapper[4784]: I0106 08:37:17.842703 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 06 08:37:17 crc kubenswrapper[4784]: I0106 08:37:17.842785 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 06 08:37:17 crc kubenswrapper[4784]: I0106 08:37:17.850721 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.850692731 podStartE2EDuration="2.850692731s" podCreationTimestamp="2026-01-06 08:37:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:37:17.847324756 +0000 UTC m=+1339.893497633" watchObservedRunningTime="2026-01-06 08:37:17.850692731 +0000 UTC m=+1339.896865608" Jan 06 08:37:17 crc kubenswrapper[4784]: I0106 08:37:17.865673 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 06 08:37:17 crc kubenswrapper[4784]: I0106 08:37:17.866351 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.125587 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-xt7gs"] Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.127449 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.137698 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-xt7gs"] Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.247988 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-ovsdbserver-nb\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.248059 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-dns-swift-storage-0\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.248087 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfc58\" (UniqueName: \"kubernetes.io/projected/8e67aeba-582a-470f-a40f-e1def33f01d2-kube-api-access-qfc58\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.248137 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-config\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.248163 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-ovsdbserver-sb\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.248232 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-dns-svc\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.352643 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-dns-svc\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.352996 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-ovsdbserver-nb\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.353133 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-dns-swift-storage-0\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.353164 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfc58\" (UniqueName: \"kubernetes.io/projected/8e67aeba-582a-470f-a40f-e1def33f01d2-kube-api-access-qfc58\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.353342 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-config\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.353401 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-ovsdbserver-sb\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.354978 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-ovsdbserver-nb\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.355059 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-dns-svc\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.356227 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-dns-swift-storage-0\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.360113 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-ovsdbserver-sb\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.361862 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-config\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.397348 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfc58\" (UniqueName: \"kubernetes.io/projected/8e67aeba-582a-470f-a40f-e1def33f01d2-kube-api-access-qfc58\") pod \"dnsmasq-dns-867cd545c7-xt7gs\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.457067 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:18 crc kubenswrapper[4784]: I0106 08:37:18.996098 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-xt7gs"] Jan 06 08:37:19 crc kubenswrapper[4784]: I0106 08:37:19.823988 4784 generic.go:334] "Generic (PLEG): container finished" podID="8e67aeba-582a-470f-a40f-e1def33f01d2" containerID="4dcefed887e3875bfae2ad8c88e28b036487507c0804663771f7066b0d7cddac" exitCode=0 Jan 06 08:37:19 crc kubenswrapper[4784]: I0106 08:37:19.824327 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" event={"ID":"8e67aeba-582a-470f-a40f-e1def33f01d2","Type":"ContainerDied","Data":"4dcefed887e3875bfae2ad8c88e28b036487507c0804663771f7066b0d7cddac"} Jan 06 08:37:19 crc kubenswrapper[4784]: I0106 08:37:19.824389 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" event={"ID":"8e67aeba-582a-470f-a40f-e1def33f01d2","Type":"ContainerStarted","Data":"a477e09c75e922220ce7f9da468f75feb10fd91005ee8a0aa7b8159d7e9c6933"} Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.185277 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.185756 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="ceilometer-central-agent" containerID="cri-o://682b10882c813ead43ddd358e909b8b59a88e2d111e716dbda41ad59cb6c2121" gracePeriod=30 Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.185889 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="ceilometer-notification-agent" containerID="cri-o://98c325bce6b04aba8ef722da43396d1618c2f18c4aae21b76be721b2fbff6f8d" gracePeriod=30 Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.185884 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="sg-core" containerID="cri-o://d5eab53b0bfd670e63c15e628bea8d1a56527e87be1ffb98f8d20d36b8806f44" gracePeriod=30 Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.186100 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="proxy-httpd" containerID="cri-o://4c7f264d6e835d5bcfd927a67aeab262e2395c4cc89a67f742f800c7f83bd73c" gracePeriod=30 Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.193177 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.195:3000/\": EOF" Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.653822 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.839958 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" event={"ID":"8e67aeba-582a-470f-a40f-e1def33f01d2","Type":"ContainerStarted","Data":"26a913b7bf6de54902c4672761268a5ffa207b1d5eb9d15f8383c8ef9d0df22f"} Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.840103 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.843784 4784 generic.go:334] "Generic (PLEG): container finished" podID="60938a41-67bb-45e6-a772-7cff69507075" containerID="4c7f264d6e835d5bcfd927a67aeab262e2395c4cc89a67f742f800c7f83bd73c" exitCode=0 Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.843821 4784 generic.go:334] "Generic (PLEG): container finished" podID="60938a41-67bb-45e6-a772-7cff69507075" containerID="d5eab53b0bfd670e63c15e628bea8d1a56527e87be1ffb98f8d20d36b8806f44" exitCode=2 Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.843833 4784 generic.go:334] "Generic (PLEG): container finished" podID="60938a41-67bb-45e6-a772-7cff69507075" containerID="682b10882c813ead43ddd358e909b8b59a88e2d111e716dbda41ad59cb6c2121" exitCode=0 Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.843848 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60938a41-67bb-45e6-a772-7cff69507075","Type":"ContainerDied","Data":"4c7f264d6e835d5bcfd927a67aeab262e2395c4cc89a67f742f800c7f83bd73c"} Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.843936 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60938a41-67bb-45e6-a772-7cff69507075","Type":"ContainerDied","Data":"d5eab53b0bfd670e63c15e628bea8d1a56527e87be1ffb98f8d20d36b8806f44"} Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.843970 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60938a41-67bb-45e6-a772-7cff69507075","Type":"ContainerDied","Data":"682b10882c813ead43ddd358e909b8b59a88e2d111e716dbda41ad59cb6c2121"} Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.844025 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="be92cba4-83b4-4f7f-b645-972cbc86c55a" containerName="nova-api-log" containerID="cri-o://36397954e18a3aa46417e79a7f9a4d37815acb719d566fa138f2ed12fe346d21" gracePeriod=30 Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.844097 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="be92cba4-83b4-4f7f-b645-972cbc86c55a" containerName="nova-api-api" containerID="cri-o://11173b50dab3312f5e3cbff3da5b4fb48e6b6bbcd567291ef20814520b46c6dc" gracePeriod=30 Jan 06 08:37:20 crc kubenswrapper[4784]: I0106 08:37:20.878163 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" podStartSLOduration=2.878137733 podStartE2EDuration="2.878137733s" podCreationTimestamp="2026-01-06 08:37:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:37:20.872953892 +0000 UTC m=+1342.919126769" watchObservedRunningTime="2026-01-06 08:37:20.878137733 +0000 UTC m=+1342.924310570" Jan 06 08:37:21 crc kubenswrapper[4784]: I0106 08:37:21.228069 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 06 08:37:21 crc kubenswrapper[4784]: I0106 08:37:21.228152 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 06 08:37:21 crc kubenswrapper[4784]: I0106 08:37:21.453957 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:21 crc kubenswrapper[4784]: I0106 08:37:21.862295 4784 generic.go:334] "Generic (PLEG): container finished" podID="be92cba4-83b4-4f7f-b645-972cbc86c55a" containerID="36397954e18a3aa46417e79a7f9a4d37815acb719d566fa138f2ed12fe346d21" exitCode=143 Jan 06 08:37:21 crc kubenswrapper[4784]: I0106 08:37:21.862407 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"be92cba4-83b4-4f7f-b645-972cbc86c55a","Type":"ContainerDied","Data":"36397954e18a3aa46417e79a7f9a4d37815acb719d566fa138f2ed12fe346d21"} Jan 06 08:37:21 crc kubenswrapper[4784]: I0106 08:37:21.869480 4784 generic.go:334] "Generic (PLEG): container finished" podID="60938a41-67bb-45e6-a772-7cff69507075" containerID="98c325bce6b04aba8ef722da43396d1618c2f18c4aae21b76be721b2fbff6f8d" exitCode=0 Jan 06 08:37:21 crc kubenswrapper[4784]: I0106 08:37:21.869586 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60938a41-67bb-45e6-a772-7cff69507075","Type":"ContainerDied","Data":"98c325bce6b04aba8ef722da43396d1618c2f18c4aae21b76be721b2fbff6f8d"} Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.266572 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.374931 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-sg-core-conf-yaml\") pod \"60938a41-67bb-45e6-a772-7cff69507075\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.375115 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60938a41-67bb-45e6-a772-7cff69507075-log-httpd\") pod \"60938a41-67bb-45e6-a772-7cff69507075\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.375169 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-scripts\") pod \"60938a41-67bb-45e6-a772-7cff69507075\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.375202 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-ceilometer-tls-certs\") pod \"60938a41-67bb-45e6-a772-7cff69507075\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.375253 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rws66\" (UniqueName: \"kubernetes.io/projected/60938a41-67bb-45e6-a772-7cff69507075-kube-api-access-rws66\") pod \"60938a41-67bb-45e6-a772-7cff69507075\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.375344 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60938a41-67bb-45e6-a772-7cff69507075-run-httpd\") pod \"60938a41-67bb-45e6-a772-7cff69507075\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.375381 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-combined-ca-bundle\") pod \"60938a41-67bb-45e6-a772-7cff69507075\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.375466 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60938a41-67bb-45e6-a772-7cff69507075-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "60938a41-67bb-45e6-a772-7cff69507075" (UID: "60938a41-67bb-45e6-a772-7cff69507075"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.375954 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60938a41-67bb-45e6-a772-7cff69507075-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "60938a41-67bb-45e6-a772-7cff69507075" (UID: "60938a41-67bb-45e6-a772-7cff69507075"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.376965 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-config-data\") pod \"60938a41-67bb-45e6-a772-7cff69507075\" (UID: \"60938a41-67bb-45e6-a772-7cff69507075\") " Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.377607 4784 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60938a41-67bb-45e6-a772-7cff69507075-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.377630 4784 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60938a41-67bb-45e6-a772-7cff69507075-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.383053 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-scripts" (OuterVolumeSpecName: "scripts") pod "60938a41-67bb-45e6-a772-7cff69507075" (UID: "60938a41-67bb-45e6-a772-7cff69507075"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.396807 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60938a41-67bb-45e6-a772-7cff69507075-kube-api-access-rws66" (OuterVolumeSpecName: "kube-api-access-rws66") pod "60938a41-67bb-45e6-a772-7cff69507075" (UID: "60938a41-67bb-45e6-a772-7cff69507075"). InnerVolumeSpecName "kube-api-access-rws66". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.443526 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "60938a41-67bb-45e6-a772-7cff69507075" (UID: "60938a41-67bb-45e6-a772-7cff69507075"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.457269 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "60938a41-67bb-45e6-a772-7cff69507075" (UID: "60938a41-67bb-45e6-a772-7cff69507075"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.482518 4784 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.482583 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.482594 4784 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.482608 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rws66\" (UniqueName: \"kubernetes.io/projected/60938a41-67bb-45e6-a772-7cff69507075-kube-api-access-rws66\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.486021 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "60938a41-67bb-45e6-a772-7cff69507075" (UID: "60938a41-67bb-45e6-a772-7cff69507075"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.505492 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-config-data" (OuterVolumeSpecName: "config-data") pod "60938a41-67bb-45e6-a772-7cff69507075" (UID: "60938a41-67bb-45e6-a772-7cff69507075"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.585016 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.585061 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60938a41-67bb-45e6-a772-7cff69507075-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.888041 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60938a41-67bb-45e6-a772-7cff69507075","Type":"ContainerDied","Data":"f57799f8b069aa81d298ee714c7defbc0b28a2c1b5a31aec6580bbafae755882"} Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.888853 4784 scope.go:117] "RemoveContainer" containerID="4c7f264d6e835d5bcfd927a67aeab262e2395c4cc89a67f742f800c7f83bd73c" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.888521 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.936538 4784 scope.go:117] "RemoveContainer" containerID="d5eab53b0bfd670e63c15e628bea8d1a56527e87be1ffb98f8d20d36b8806f44" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.943765 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.963846 4784 scope.go:117] "RemoveContainer" containerID="98c325bce6b04aba8ef722da43396d1618c2f18c4aae21b76be721b2fbff6f8d" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.973481 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.982187 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:37:22 crc kubenswrapper[4784]: E0106 08:37:22.983149 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="ceilometer-central-agent" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.983189 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="ceilometer-central-agent" Jan 06 08:37:22 crc kubenswrapper[4784]: E0106 08:37:22.983223 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="sg-core" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.983237 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="sg-core" Jan 06 08:37:22 crc kubenswrapper[4784]: E0106 08:37:22.983272 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="proxy-httpd" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.983287 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="proxy-httpd" Jan 06 08:37:22 crc kubenswrapper[4784]: E0106 08:37:22.983331 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="ceilometer-notification-agent" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.983346 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="ceilometer-notification-agent" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.983745 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="proxy-httpd" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.983811 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="ceilometer-notification-agent" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.983848 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="ceilometer-central-agent" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.983873 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="60938a41-67bb-45e6-a772-7cff69507075" containerName="sg-core" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.987571 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.991404 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.991721 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.991822 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 06 08:37:22 crc kubenswrapper[4784]: I0106 08:37:22.991872 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.012513 4784 scope.go:117] "RemoveContainer" containerID="682b10882c813ead43ddd358e909b8b59a88e2d111e716dbda41ad59cb6c2121" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.098593 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-scripts\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.098731 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.098774 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-run-httpd\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.098882 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-config-data\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.098968 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-log-httpd\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.099050 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.099101 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5dt7\" (UniqueName: \"kubernetes.io/projected/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-kube-api-access-s5dt7\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.099156 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.201685 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-config-data\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.201869 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-log-httpd\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.201997 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.202048 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5dt7\" (UniqueName: \"kubernetes.io/projected/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-kube-api-access-s5dt7\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.202174 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.202255 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-scripts\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.202341 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.202384 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-run-httpd\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.203261 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-run-httpd\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.203740 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-log-httpd\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.213678 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-scripts\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.214123 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.215636 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-config-data\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.215629 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.217677 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.226693 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5dt7\" (UniqueName: \"kubernetes.io/projected/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-kube-api-access-s5dt7\") pod \"ceilometer-0\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.316629 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.834268 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:37:23 crc kubenswrapper[4784]: I0106 08:37:23.903815 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d","Type":"ContainerStarted","Data":"35660d6fa6a070566dffa45ab763671bba14b1bead0045555e854f3245462de7"} Jan 06 08:37:24 crc kubenswrapper[4784]: I0106 08:37:24.325399 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60938a41-67bb-45e6-a772-7cff69507075" path="/var/lib/kubelet/pods/60938a41-67bb-45e6-a772-7cff69507075/volumes" Jan 06 08:37:24 crc kubenswrapper[4784]: I0106 08:37:24.916879 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d","Type":"ContainerStarted","Data":"4ef361b972f42bea1be49ec4ca483a7dbcd0419667a1513a9db6c46533072348"} Jan 06 08:37:24 crc kubenswrapper[4784]: I0106 08:37:24.920209 4784 generic.go:334] "Generic (PLEG): container finished" podID="be92cba4-83b4-4f7f-b645-972cbc86c55a" containerID="11173b50dab3312f5e3cbff3da5b4fb48e6b6bbcd567291ef20814520b46c6dc" exitCode=0 Jan 06 08:37:24 crc kubenswrapper[4784]: I0106 08:37:24.920272 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"be92cba4-83b4-4f7f-b645-972cbc86c55a","Type":"ContainerDied","Data":"11173b50dab3312f5e3cbff3da5b4fb48e6b6bbcd567291ef20814520b46c6dc"} Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.252396 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.349816 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jq7d\" (UniqueName: \"kubernetes.io/projected/be92cba4-83b4-4f7f-b645-972cbc86c55a-kube-api-access-9jq7d\") pod \"be92cba4-83b4-4f7f-b645-972cbc86c55a\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.349979 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be92cba4-83b4-4f7f-b645-972cbc86c55a-config-data\") pod \"be92cba4-83b4-4f7f-b645-972cbc86c55a\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.350191 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be92cba4-83b4-4f7f-b645-972cbc86c55a-combined-ca-bundle\") pod \"be92cba4-83b4-4f7f-b645-972cbc86c55a\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.350238 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be92cba4-83b4-4f7f-b645-972cbc86c55a-logs\") pod \"be92cba4-83b4-4f7f-b645-972cbc86c55a\" (UID: \"be92cba4-83b4-4f7f-b645-972cbc86c55a\") " Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.357195 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be92cba4-83b4-4f7f-b645-972cbc86c55a-logs" (OuterVolumeSpecName: "logs") pod "be92cba4-83b4-4f7f-b645-972cbc86c55a" (UID: "be92cba4-83b4-4f7f-b645-972cbc86c55a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.357939 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be92cba4-83b4-4f7f-b645-972cbc86c55a-kube-api-access-9jq7d" (OuterVolumeSpecName: "kube-api-access-9jq7d") pod "be92cba4-83b4-4f7f-b645-972cbc86c55a" (UID: "be92cba4-83b4-4f7f-b645-972cbc86c55a"). InnerVolumeSpecName "kube-api-access-9jq7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.389894 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be92cba4-83b4-4f7f-b645-972cbc86c55a-config-data" (OuterVolumeSpecName: "config-data") pod "be92cba4-83b4-4f7f-b645-972cbc86c55a" (UID: "be92cba4-83b4-4f7f-b645-972cbc86c55a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.400848 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be92cba4-83b4-4f7f-b645-972cbc86c55a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "be92cba4-83b4-4f7f-b645-972cbc86c55a" (UID: "be92cba4-83b4-4f7f-b645-972cbc86c55a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.453715 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be92cba4-83b4-4f7f-b645-972cbc86c55a-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.453752 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be92cba4-83b4-4f7f-b645-972cbc86c55a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.453765 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be92cba4-83b4-4f7f-b645-972cbc86c55a-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.453774 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jq7d\" (UniqueName: \"kubernetes.io/projected/be92cba4-83b4-4f7f-b645-972cbc86c55a-kube-api-access-9jq7d\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.934865 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d","Type":"ContainerStarted","Data":"dc6835d04241d55d04ae4a35e417f32bdfac1a264f3e1266dbf5a6c0cea46b5d"} Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.945723 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"be92cba4-83b4-4f7f-b645-972cbc86c55a","Type":"ContainerDied","Data":"a139f3ad1e7de2711bdd12ce7d054744232ee6dbacb833fa5917bc0997e3cf02"} Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.946083 4784 scope.go:117] "RemoveContainer" containerID="11173b50dab3312f5e3cbff3da5b4fb48e6b6bbcd567291ef20814520b46c6dc" Jan 06 08:37:25 crc kubenswrapper[4784]: I0106 08:37:25.945911 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.049080 4784 scope.go:117] "RemoveContainer" containerID="36397954e18a3aa46417e79a7f9a4d37815acb719d566fa138f2ed12fe346d21" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.058994 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.073936 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.085194 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 06 08:37:26 crc kubenswrapper[4784]: E0106 08:37:26.085782 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be92cba4-83b4-4f7f-b645-972cbc86c55a" containerName="nova-api-log" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.085811 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="be92cba4-83b4-4f7f-b645-972cbc86c55a" containerName="nova-api-log" Jan 06 08:37:26 crc kubenswrapper[4784]: E0106 08:37:26.085848 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be92cba4-83b4-4f7f-b645-972cbc86c55a" containerName="nova-api-api" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.085855 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="be92cba4-83b4-4f7f-b645-972cbc86c55a" containerName="nova-api-api" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.086060 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="be92cba4-83b4-4f7f-b645-972cbc86c55a" containerName="nova-api-api" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.086081 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="be92cba4-83b4-4f7f-b645-972cbc86c55a" containerName="nova-api-log" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.087203 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.093012 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.093702 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.093949 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.104950 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.173191 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.173298 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qhbt\" (UniqueName: \"kubernetes.io/projected/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-kube-api-access-9qhbt\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.173340 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-config-data\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.173359 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-logs\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.173394 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.173443 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-public-tls-certs\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.229384 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.229428 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.275137 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qhbt\" (UniqueName: \"kubernetes.io/projected/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-kube-api-access-9qhbt\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.275213 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-config-data\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.275246 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-logs\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.275287 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.275350 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-public-tls-certs\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.275421 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.276158 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-logs\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.281816 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-config-data\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.283057 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-public-tls-certs\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.284004 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.285849 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.294747 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qhbt\" (UniqueName: \"kubernetes.io/projected/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-kube-api-access-9qhbt\") pod \"nova-api-0\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.325667 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be92cba4-83b4-4f7f-b645-972cbc86c55a" path="/var/lib/kubelet/pods/be92cba4-83b4-4f7f-b645-972cbc86c55a/volumes" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.454240 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.457858 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.485910 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.970357 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d","Type":"ContainerStarted","Data":"7f0e5dcde65f104bcb37b1f44544e265a3b934ad96e3df20a22d72ff4674f3a0"} Jan 06 08:37:26 crc kubenswrapper[4784]: I0106 08:37:26.997406 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.014305 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.279279 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.196:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.279517 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.196:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.295923 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-79rk8"] Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.297974 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.302154 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.302441 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.332844 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-79rk8"] Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.421304 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tgtq\" (UniqueName: \"kubernetes.io/projected/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-kube-api-access-5tgtq\") pod \"nova-cell1-cell-mapping-79rk8\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.421957 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-scripts\") pod \"nova-cell1-cell-mapping-79rk8\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.422066 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-config-data\") pod \"nova-cell1-cell-mapping-79rk8\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.422094 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-79rk8\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.523938 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tgtq\" (UniqueName: \"kubernetes.io/projected/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-kube-api-access-5tgtq\") pod \"nova-cell1-cell-mapping-79rk8\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.524765 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-scripts\") pod \"nova-cell1-cell-mapping-79rk8\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.524875 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-config-data\") pod \"nova-cell1-cell-mapping-79rk8\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.524904 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-79rk8\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.531128 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-scripts\") pod \"nova-cell1-cell-mapping-79rk8\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.531418 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-config-data\") pod \"nova-cell1-cell-mapping-79rk8\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.537437 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-79rk8\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.560763 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tgtq\" (UniqueName: \"kubernetes.io/projected/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-kube-api-access-5tgtq\") pod \"nova-cell1-cell-mapping-79rk8\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:27 crc kubenswrapper[4784]: I0106 08:37:27.673737 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:28 crc kubenswrapper[4784]: I0106 08:37:28.012482 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d","Type":"ContainerStarted","Data":"720bfe1553db1a3c166e22d70a377ca997d8451c2dc8326f20d8aeb1c6b818b6"} Jan 06 08:37:28 crc kubenswrapper[4784]: I0106 08:37:28.015230 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 06 08:37:28 crc kubenswrapper[4784]: I0106 08:37:28.030305 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e5cc5a18-ca65-48a8-a2bf-b2207a749b92","Type":"ContainerStarted","Data":"3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70"} Jan 06 08:37:28 crc kubenswrapper[4784]: I0106 08:37:28.030363 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e5cc5a18-ca65-48a8-a2bf-b2207a749b92","Type":"ContainerStarted","Data":"962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f"} Jan 06 08:37:28 crc kubenswrapper[4784]: I0106 08:37:28.030376 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e5cc5a18-ca65-48a8-a2bf-b2207a749b92","Type":"ContainerStarted","Data":"c30cda714318aa0fe39b2c1128e9497d80c1e3c4e8795f292f3079927b733fbc"} Jan 06 08:37:28 crc kubenswrapper[4784]: I0106 08:37:28.086931 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.281115937 podStartE2EDuration="6.086905785s" podCreationTimestamp="2026-01-06 08:37:22 +0000 UTC" firstStartedPulling="2026-01-06 08:37:23.838009879 +0000 UTC m=+1345.884182726" lastFinishedPulling="2026-01-06 08:37:27.643799737 +0000 UTC m=+1349.689972574" observedRunningTime="2026-01-06 08:37:28.041610305 +0000 UTC m=+1350.087783152" watchObservedRunningTime="2026-01-06 08:37:28.086905785 +0000 UTC m=+1350.133078622" Jan 06 08:37:28 crc kubenswrapper[4784]: I0106 08:37:28.240412 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.240371824 podStartE2EDuration="2.240371824s" podCreationTimestamp="2026-01-06 08:37:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:37:28.077404509 +0000 UTC m=+1350.123577356" watchObservedRunningTime="2026-01-06 08:37:28.240371824 +0000 UTC m=+1350.286544701" Jan 06 08:37:28 crc kubenswrapper[4784]: I0106 08:37:28.263121 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-79rk8"] Jan 06 08:37:28 crc kubenswrapper[4784]: I0106 08:37:28.459216 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:37:28 crc kubenswrapper[4784]: I0106 08:37:28.551163 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-k245g"] Jan 06 08:37:28 crc kubenswrapper[4784]: I0106 08:37:28.552004 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" podUID="97759d02-ee09-4c0a-bc00-a6a940f45fc5" containerName="dnsmasq-dns" containerID="cri-o://ef4ed89dc708181ffc925d7e028fb5062c84cfa88d18f20a58a06f6a86020e18" gracePeriod=10 Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.069083 4784 generic.go:334] "Generic (PLEG): container finished" podID="97759d02-ee09-4c0a-bc00-a6a940f45fc5" containerID="ef4ed89dc708181ffc925d7e028fb5062c84cfa88d18f20a58a06f6a86020e18" exitCode=0 Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.069376 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" event={"ID":"97759d02-ee09-4c0a-bc00-a6a940f45fc5","Type":"ContainerDied","Data":"ef4ed89dc708181ffc925d7e028fb5062c84cfa88d18f20a58a06f6a86020e18"} Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.069757 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" event={"ID":"97759d02-ee09-4c0a-bc00-a6a940f45fc5","Type":"ContainerDied","Data":"10fb19ffae5e16f590263e2d00076e71dc8b1aa0afc995751b37ba9c0a521ad2"} Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.069779 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="10fb19ffae5e16f590263e2d00076e71dc8b1aa0afc995751b37ba9c0a521ad2" Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.076716 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-79rk8" event={"ID":"8bb037f9-c2c5-4d71-af96-2c1ce93f720a","Type":"ContainerStarted","Data":"3323b77f708d877c321b69ac39bf8466a315e9fe763a648674fb29ca28122eac"} Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.076752 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-79rk8" event={"ID":"8bb037f9-c2c5-4d71-af96-2c1ce93f720a","Type":"ContainerStarted","Data":"b1fc650310bc69951d7842f3314150aefa627b0366bb09ef4c201ab6a4b5bbbf"} Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.109921 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-79rk8" podStartSLOduration=2.109894539 podStartE2EDuration="2.109894539s" podCreationTimestamp="2026-01-06 08:37:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:37:29.097565366 +0000 UTC m=+1351.143738203" watchObservedRunningTime="2026-01-06 08:37:29.109894539 +0000 UTC m=+1351.156067376" Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.120905 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.287420 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-ovsdbserver-sb\") pod \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.287577 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlxx9\" (UniqueName: \"kubernetes.io/projected/97759d02-ee09-4c0a-bc00-a6a940f45fc5-kube-api-access-mlxx9\") pod \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.287734 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-ovsdbserver-nb\") pod \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.287784 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-dns-svc\") pod \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.287875 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-dns-swift-storage-0\") pod \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.287935 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-config\") pod \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\" (UID: \"97759d02-ee09-4c0a-bc00-a6a940f45fc5\") " Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.296354 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97759d02-ee09-4c0a-bc00-a6a940f45fc5-kube-api-access-mlxx9" (OuterVolumeSpecName: "kube-api-access-mlxx9") pod "97759d02-ee09-4c0a-bc00-a6a940f45fc5" (UID: "97759d02-ee09-4c0a-bc00-a6a940f45fc5"). InnerVolumeSpecName "kube-api-access-mlxx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.346261 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "97759d02-ee09-4c0a-bc00-a6a940f45fc5" (UID: "97759d02-ee09-4c0a-bc00-a6a940f45fc5"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.359021 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "97759d02-ee09-4c0a-bc00-a6a940f45fc5" (UID: "97759d02-ee09-4c0a-bc00-a6a940f45fc5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.369859 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-config" (OuterVolumeSpecName: "config") pod "97759d02-ee09-4c0a-bc00-a6a940f45fc5" (UID: "97759d02-ee09-4c0a-bc00-a6a940f45fc5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.371288 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "97759d02-ee09-4c0a-bc00-a6a940f45fc5" (UID: "97759d02-ee09-4c0a-bc00-a6a940f45fc5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.372494 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "97759d02-ee09-4c0a-bc00-a6a940f45fc5" (UID: "97759d02-ee09-4c0a-bc00-a6a940f45fc5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.391665 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.391731 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.391748 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlxx9\" (UniqueName: \"kubernetes.io/projected/97759d02-ee09-4c0a-bc00-a6a940f45fc5-kube-api-access-mlxx9\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.391783 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.391796 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:29 crc kubenswrapper[4784]: I0106 08:37:29.391808 4784 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/97759d02-ee09-4c0a-bc00-a6a940f45fc5-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:30 crc kubenswrapper[4784]: I0106 08:37:30.084840 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bfb54f9b5-k245g" Jan 06 08:37:30 crc kubenswrapper[4784]: I0106 08:37:30.134214 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-k245g"] Jan 06 08:37:30 crc kubenswrapper[4784]: I0106 08:37:30.146593 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-k245g"] Jan 06 08:37:30 crc kubenswrapper[4784]: I0106 08:37:30.329282 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97759d02-ee09-4c0a-bc00-a6a940f45fc5" path="/var/lib/kubelet/pods/97759d02-ee09-4c0a-bc00-a6a940f45fc5/volumes" Jan 06 08:37:34 crc kubenswrapper[4784]: I0106 08:37:34.151164 4784 generic.go:334] "Generic (PLEG): container finished" podID="8bb037f9-c2c5-4d71-af96-2c1ce93f720a" containerID="3323b77f708d877c321b69ac39bf8466a315e9fe763a648674fb29ca28122eac" exitCode=0 Jan 06 08:37:34 crc kubenswrapper[4784]: I0106 08:37:34.151296 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-79rk8" event={"ID":"8bb037f9-c2c5-4d71-af96-2c1ce93f720a","Type":"ContainerDied","Data":"3323b77f708d877c321b69ac39bf8466a315e9fe763a648674fb29ca28122eac"} Jan 06 08:37:35 crc kubenswrapper[4784]: I0106 08:37:35.688426 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:35 crc kubenswrapper[4784]: I0106 08:37:35.770306 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5tgtq\" (UniqueName: \"kubernetes.io/projected/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-kube-api-access-5tgtq\") pod \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " Jan 06 08:37:35 crc kubenswrapper[4784]: I0106 08:37:35.770442 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-scripts\") pod \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " Jan 06 08:37:35 crc kubenswrapper[4784]: I0106 08:37:35.770501 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-config-data\") pod \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " Jan 06 08:37:35 crc kubenswrapper[4784]: I0106 08:37:35.770674 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-combined-ca-bundle\") pod \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\" (UID: \"8bb037f9-c2c5-4d71-af96-2c1ce93f720a\") " Jan 06 08:37:35 crc kubenswrapper[4784]: I0106 08:37:35.779353 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-kube-api-access-5tgtq" (OuterVolumeSpecName: "kube-api-access-5tgtq") pod "8bb037f9-c2c5-4d71-af96-2c1ce93f720a" (UID: "8bb037f9-c2c5-4d71-af96-2c1ce93f720a"). InnerVolumeSpecName "kube-api-access-5tgtq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:37:35 crc kubenswrapper[4784]: I0106 08:37:35.779742 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-scripts" (OuterVolumeSpecName: "scripts") pod "8bb037f9-c2c5-4d71-af96-2c1ce93f720a" (UID: "8bb037f9-c2c5-4d71-af96-2c1ce93f720a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:35 crc kubenswrapper[4784]: I0106 08:37:35.815636 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-config-data" (OuterVolumeSpecName: "config-data") pod "8bb037f9-c2c5-4d71-af96-2c1ce93f720a" (UID: "8bb037f9-c2c5-4d71-af96-2c1ce93f720a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:35 crc kubenswrapper[4784]: I0106 08:37:35.818391 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8bb037f9-c2c5-4d71-af96-2c1ce93f720a" (UID: "8bb037f9-c2c5-4d71-af96-2c1ce93f720a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:35 crc kubenswrapper[4784]: I0106 08:37:35.873839 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5tgtq\" (UniqueName: \"kubernetes.io/projected/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-kube-api-access-5tgtq\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:35 crc kubenswrapper[4784]: I0106 08:37:35.873889 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:35 crc kubenswrapper[4784]: I0106 08:37:35.873902 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:35 crc kubenswrapper[4784]: I0106 08:37:35.873918 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bb037f9-c2c5-4d71-af96-2c1ce93f720a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:36 crc kubenswrapper[4784]: I0106 08:37:36.182894 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-79rk8" event={"ID":"8bb037f9-c2c5-4d71-af96-2c1ce93f720a","Type":"ContainerDied","Data":"b1fc650310bc69951d7842f3314150aefa627b0366bb09ef4c201ab6a4b5bbbf"} Jan 06 08:37:36 crc kubenswrapper[4784]: I0106 08:37:36.182966 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1fc650310bc69951d7842f3314150aefa627b0366bb09ef4c201ab6a4b5bbbf" Jan 06 08:37:36 crc kubenswrapper[4784]: I0106 08:37:36.183039 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-79rk8" Jan 06 08:37:36 crc kubenswrapper[4784]: I0106 08:37:36.240802 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 06 08:37:36 crc kubenswrapper[4784]: I0106 08:37:36.245284 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 06 08:37:36 crc kubenswrapper[4784]: I0106 08:37:36.250747 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 06 08:37:36 crc kubenswrapper[4784]: I0106 08:37:36.396208 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:37:36 crc kubenswrapper[4784]: I0106 08:37:36.396695 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e5cc5a18-ca65-48a8-a2bf-b2207a749b92" containerName="nova-api-log" containerID="cri-o://962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f" gracePeriod=30 Jan 06 08:37:36 crc kubenswrapper[4784]: I0106 08:37:36.396852 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e5cc5a18-ca65-48a8-a2bf-b2207a749b92" containerName="nova-api-api" containerID="cri-o://3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70" gracePeriod=30 Jan 06 08:37:36 crc kubenswrapper[4784]: I0106 08:37:36.408242 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:37:36 crc kubenswrapper[4784]: I0106 08:37:36.408526 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="0e978fb0-450f-4c11-bdb8-6bc6b181d471" containerName="nova-scheduler-scheduler" containerID="cri-o://c03d9906b7768ae9a3aa053028fab2ef62650309d7600bec779eb450fa69b23c" gracePeriod=30 Jan 06 08:37:36 crc kubenswrapper[4784]: I0106 08:37:36.466372 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:37:36 crc kubenswrapper[4784]: I0106 08:37:36.997367 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.125682 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-logs\") pod \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.125836 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9qhbt\" (UniqueName: \"kubernetes.io/projected/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-kube-api-access-9qhbt\") pod \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.125914 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-config-data\") pod \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.126126 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-internal-tls-certs\") pod \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.126171 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-logs" (OuterVolumeSpecName: "logs") pod "e5cc5a18-ca65-48a8-a2bf-b2207a749b92" (UID: "e5cc5a18-ca65-48a8-a2bf-b2207a749b92"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.126387 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-combined-ca-bundle\") pod \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.126469 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-public-tls-certs\") pod \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\" (UID: \"e5cc5a18-ca65-48a8-a2bf-b2207a749b92\") " Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.130111 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.143135 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-kube-api-access-9qhbt" (OuterVolumeSpecName: "kube-api-access-9qhbt") pod "e5cc5a18-ca65-48a8-a2bf-b2207a749b92" (UID: "e5cc5a18-ca65-48a8-a2bf-b2207a749b92"). InnerVolumeSpecName "kube-api-access-9qhbt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.171385 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-config-data" (OuterVolumeSpecName: "config-data") pod "e5cc5a18-ca65-48a8-a2bf-b2207a749b92" (UID: "e5cc5a18-ca65-48a8-a2bf-b2207a749b92"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.185395 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "e5cc5a18-ca65-48a8-a2bf-b2207a749b92" (UID: "e5cc5a18-ca65-48a8-a2bf-b2207a749b92"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.197712 4784 generic.go:334] "Generic (PLEG): container finished" podID="e5cc5a18-ca65-48a8-a2bf-b2207a749b92" containerID="3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70" exitCode=0 Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.197759 4784 generic.go:334] "Generic (PLEG): container finished" podID="e5cc5a18-ca65-48a8-a2bf-b2207a749b92" containerID="962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f" exitCode=143 Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.198166 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.198999 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e5cc5a18-ca65-48a8-a2bf-b2207a749b92","Type":"ContainerDied","Data":"3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70"} Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.199041 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e5cc5a18-ca65-48a8-a2bf-b2207a749b92","Type":"ContainerDied","Data":"962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f"} Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.199052 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e5cc5a18-ca65-48a8-a2bf-b2207a749b92","Type":"ContainerDied","Data":"c30cda714318aa0fe39b2c1128e9497d80c1e3c4e8795f292f3079927b733fbc"} Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.199069 4784 scope.go:117] "RemoveContainer" containerID="3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.202711 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e5cc5a18-ca65-48a8-a2bf-b2207a749b92" (UID: "e5cc5a18-ca65-48a8-a2bf-b2207a749b92"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.210814 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.211649 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "e5cc5a18-ca65-48a8-a2bf-b2207a749b92" (UID: "e5cc5a18-ca65-48a8-a2bf-b2207a749b92"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.235272 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9qhbt\" (UniqueName: \"kubernetes.io/projected/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-kube-api-access-9qhbt\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.235305 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.235315 4784 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.235324 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.235332 4784 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5cc5a18-ca65-48a8-a2bf-b2207a749b92-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.302858 4784 scope.go:117] "RemoveContainer" containerID="962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.341881 4784 scope.go:117] "RemoveContainer" containerID="3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70" Jan 06 08:37:37 crc kubenswrapper[4784]: E0106 08:37:37.346270 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70\": container with ID starting with 3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70 not found: ID does not exist" containerID="3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.346355 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70"} err="failed to get container status \"3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70\": rpc error: code = NotFound desc = could not find container \"3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70\": container with ID starting with 3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70 not found: ID does not exist" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.346402 4784 scope.go:117] "RemoveContainer" containerID="962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f" Jan 06 08:37:37 crc kubenswrapper[4784]: E0106 08:37:37.349322 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f\": container with ID starting with 962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f not found: ID does not exist" containerID="962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.349377 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f"} err="failed to get container status \"962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f\": rpc error: code = NotFound desc = could not find container \"962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f\": container with ID starting with 962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f not found: ID does not exist" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.349414 4784 scope.go:117] "RemoveContainer" containerID="3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.349854 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70"} err="failed to get container status \"3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70\": rpc error: code = NotFound desc = could not find container \"3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70\": container with ID starting with 3a22829a39fe3f25952d28f232bd1cc3833afe09486cf3a05593dcf5e632eb70 not found: ID does not exist" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.349914 4784 scope.go:117] "RemoveContainer" containerID="962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.352187 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f"} err="failed to get container status \"962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f\": rpc error: code = NotFound desc = could not find container \"962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f\": container with ID starting with 962cd6aecc31720ea0e9ccca4bf7e4d2c50b288d611f1e8ac988b1a63ef3189f not found: ID does not exist" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.555062 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.578304 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.597313 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 06 08:37:37 crc kubenswrapper[4784]: E0106 08:37:37.597803 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97759d02-ee09-4c0a-bc00-a6a940f45fc5" containerName="init" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.597821 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="97759d02-ee09-4c0a-bc00-a6a940f45fc5" containerName="init" Jan 06 08:37:37 crc kubenswrapper[4784]: E0106 08:37:37.597855 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5cc5a18-ca65-48a8-a2bf-b2207a749b92" containerName="nova-api-log" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.597860 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5cc5a18-ca65-48a8-a2bf-b2207a749b92" containerName="nova-api-log" Jan 06 08:37:37 crc kubenswrapper[4784]: E0106 08:37:37.597874 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5cc5a18-ca65-48a8-a2bf-b2207a749b92" containerName="nova-api-api" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.597880 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5cc5a18-ca65-48a8-a2bf-b2207a749b92" containerName="nova-api-api" Jan 06 08:37:37 crc kubenswrapper[4784]: E0106 08:37:37.597890 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97759d02-ee09-4c0a-bc00-a6a940f45fc5" containerName="dnsmasq-dns" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.597896 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="97759d02-ee09-4c0a-bc00-a6a940f45fc5" containerName="dnsmasq-dns" Jan 06 08:37:37 crc kubenswrapper[4784]: E0106 08:37:37.597914 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bb037f9-c2c5-4d71-af96-2c1ce93f720a" containerName="nova-manage" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.597919 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bb037f9-c2c5-4d71-af96-2c1ce93f720a" containerName="nova-manage" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.598079 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="97759d02-ee09-4c0a-bc00-a6a940f45fc5" containerName="dnsmasq-dns" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.598094 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bb037f9-c2c5-4d71-af96-2c1ce93f720a" containerName="nova-manage" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.598102 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5cc5a18-ca65-48a8-a2bf-b2207a749b92" containerName="nova-api-log" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.598123 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5cc5a18-ca65-48a8-a2bf-b2207a749b92" containerName="nova-api-api" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.599033 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.599126 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.632891 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.633345 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.634161 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.753969 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.754054 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-public-tls-certs\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.754104 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqlk5\" (UniqueName: \"kubernetes.io/projected/19f8ed37-5996-433b-9915-97489c1d8f11-kube-api-access-jqlk5\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.754131 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19f8ed37-5996-433b-9915-97489c1d8f11-logs\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.754153 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-config-data\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.756187 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-internal-tls-certs\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.859301 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-internal-tls-certs\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.859382 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.859427 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-public-tls-certs\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.859461 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqlk5\" (UniqueName: \"kubernetes.io/projected/19f8ed37-5996-433b-9915-97489c1d8f11-kube-api-access-jqlk5\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.859485 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19f8ed37-5996-433b-9915-97489c1d8f11-logs\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.859504 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-config-data\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.868264 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19f8ed37-5996-433b-9915-97489c1d8f11-logs\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.872125 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-public-tls-certs\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.872485 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.873809 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-internal-tls-certs\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.892115 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-config-data\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:37 crc kubenswrapper[4784]: I0106 08:37:37.898967 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqlk5\" (UniqueName: \"kubernetes.io/projected/19f8ed37-5996-433b-9915-97489c1d8f11-kube-api-access-jqlk5\") pod \"nova-api-0\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " pod="openstack/nova-api-0" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.001888 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.109587 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.214463 4784 generic.go:334] "Generic (PLEG): container finished" podID="0e978fb0-450f-4c11-bdb8-6bc6b181d471" containerID="c03d9906b7768ae9a3aa053028fab2ef62650309d7600bec779eb450fa69b23c" exitCode=0 Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.214666 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0e978fb0-450f-4c11-bdb8-6bc6b181d471","Type":"ContainerDied","Data":"c03d9906b7768ae9a3aa053028fab2ef62650309d7600bec779eb450fa69b23c"} Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.214731 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0e978fb0-450f-4c11-bdb8-6bc6b181d471","Type":"ContainerDied","Data":"ad89c70e762f72acc71fcc401f4abbb112b32d6cbcc122b5fac6b1341f1b2de9"} Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.214754 4784 scope.go:117] "RemoveContainer" containerID="c03d9906b7768ae9a3aa053028fab2ef62650309d7600bec779eb450fa69b23c" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.214805 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" containerName="nova-metadata-log" containerID="cri-o://cf52ed937a7eb23c4e9f98d98a78f01d34ba3a53d8fa4db35cca24da66e91bc1" gracePeriod=30 Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.214980 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.214959 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" containerName="nova-metadata-metadata" containerID="cri-o://a289f9adf484697b5e2e7513e09a34e6a2215f6e2443766fc229cb4cc88bd7ac" gracePeriod=30 Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.268779 4784 scope.go:117] "RemoveContainer" containerID="c03d9906b7768ae9a3aa053028fab2ef62650309d7600bec779eb450fa69b23c" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.269850 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e978fb0-450f-4c11-bdb8-6bc6b181d471-combined-ca-bundle\") pod \"0e978fb0-450f-4c11-bdb8-6bc6b181d471\" (UID: \"0e978fb0-450f-4c11-bdb8-6bc6b181d471\") " Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.270128 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2kl2\" (UniqueName: \"kubernetes.io/projected/0e978fb0-450f-4c11-bdb8-6bc6b181d471-kube-api-access-c2kl2\") pod \"0e978fb0-450f-4c11-bdb8-6bc6b181d471\" (UID: \"0e978fb0-450f-4c11-bdb8-6bc6b181d471\") " Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.270166 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e978fb0-450f-4c11-bdb8-6bc6b181d471-config-data\") pod \"0e978fb0-450f-4c11-bdb8-6bc6b181d471\" (UID: \"0e978fb0-450f-4c11-bdb8-6bc6b181d471\") " Jan 06 08:37:38 crc kubenswrapper[4784]: E0106 08:37:38.271190 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c03d9906b7768ae9a3aa053028fab2ef62650309d7600bec779eb450fa69b23c\": container with ID starting with c03d9906b7768ae9a3aa053028fab2ef62650309d7600bec779eb450fa69b23c not found: ID does not exist" containerID="c03d9906b7768ae9a3aa053028fab2ef62650309d7600bec779eb450fa69b23c" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.271223 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c03d9906b7768ae9a3aa053028fab2ef62650309d7600bec779eb450fa69b23c"} err="failed to get container status \"c03d9906b7768ae9a3aa053028fab2ef62650309d7600bec779eb450fa69b23c\": rpc error: code = NotFound desc = could not find container \"c03d9906b7768ae9a3aa053028fab2ef62650309d7600bec779eb450fa69b23c\": container with ID starting with c03d9906b7768ae9a3aa053028fab2ef62650309d7600bec779eb450fa69b23c not found: ID does not exist" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.281963 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e978fb0-450f-4c11-bdb8-6bc6b181d471-kube-api-access-c2kl2" (OuterVolumeSpecName: "kube-api-access-c2kl2") pod "0e978fb0-450f-4c11-bdb8-6bc6b181d471" (UID: "0e978fb0-450f-4c11-bdb8-6bc6b181d471"). InnerVolumeSpecName "kube-api-access-c2kl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.307448 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e978fb0-450f-4c11-bdb8-6bc6b181d471-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0e978fb0-450f-4c11-bdb8-6bc6b181d471" (UID: "0e978fb0-450f-4c11-bdb8-6bc6b181d471"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.309487 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e978fb0-450f-4c11-bdb8-6bc6b181d471-config-data" (OuterVolumeSpecName: "config-data") pod "0e978fb0-450f-4c11-bdb8-6bc6b181d471" (UID: "0e978fb0-450f-4c11-bdb8-6bc6b181d471"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.334464 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5cc5a18-ca65-48a8-a2bf-b2207a749b92" path="/var/lib/kubelet/pods/e5cc5a18-ca65-48a8-a2bf-b2207a749b92/volumes" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.372921 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e978fb0-450f-4c11-bdb8-6bc6b181d471-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.372973 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2kl2\" (UniqueName: \"kubernetes.io/projected/0e978fb0-450f-4c11-bdb8-6bc6b181d471-kube-api-access-c2kl2\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.373019 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e978fb0-450f-4c11-bdb8-6bc6b181d471-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.541281 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.557111 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.575554 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.616634 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:37:38 crc kubenswrapper[4784]: E0106 08:37:38.617258 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e978fb0-450f-4c11-bdb8-6bc6b181d471" containerName="nova-scheduler-scheduler" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.617273 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e978fb0-450f-4c11-bdb8-6bc6b181d471" containerName="nova-scheduler-scheduler" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.617503 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e978fb0-450f-4c11-bdb8-6bc6b181d471" containerName="nova-scheduler-scheduler" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.622785 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.627932 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.632041 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.781878 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1e87443-2d75-4063-934c-dc593d03987c-config-data\") pod \"nova-scheduler-0\" (UID: \"d1e87443-2d75-4063-934c-dc593d03987c\") " pod="openstack/nova-scheduler-0" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.781937 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjlbw\" (UniqueName: \"kubernetes.io/projected/d1e87443-2d75-4063-934c-dc593d03987c-kube-api-access-tjlbw\") pod \"nova-scheduler-0\" (UID: \"d1e87443-2d75-4063-934c-dc593d03987c\") " pod="openstack/nova-scheduler-0" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.781961 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1e87443-2d75-4063-934c-dc593d03987c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d1e87443-2d75-4063-934c-dc593d03987c\") " pod="openstack/nova-scheduler-0" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.885834 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1e87443-2d75-4063-934c-dc593d03987c-config-data\") pod \"nova-scheduler-0\" (UID: \"d1e87443-2d75-4063-934c-dc593d03987c\") " pod="openstack/nova-scheduler-0" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.886295 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjlbw\" (UniqueName: \"kubernetes.io/projected/d1e87443-2d75-4063-934c-dc593d03987c-kube-api-access-tjlbw\") pod \"nova-scheduler-0\" (UID: \"d1e87443-2d75-4063-934c-dc593d03987c\") " pod="openstack/nova-scheduler-0" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.886342 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1e87443-2d75-4063-934c-dc593d03987c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d1e87443-2d75-4063-934c-dc593d03987c\") " pod="openstack/nova-scheduler-0" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.893701 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1e87443-2d75-4063-934c-dc593d03987c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d1e87443-2d75-4063-934c-dc593d03987c\") " pod="openstack/nova-scheduler-0" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.895838 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1e87443-2d75-4063-934c-dc593d03987c-config-data\") pod \"nova-scheduler-0\" (UID: \"d1e87443-2d75-4063-934c-dc593d03987c\") " pod="openstack/nova-scheduler-0" Jan 06 08:37:38 crc kubenswrapper[4784]: I0106 08:37:38.913036 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjlbw\" (UniqueName: \"kubernetes.io/projected/d1e87443-2d75-4063-934c-dc593d03987c-kube-api-access-tjlbw\") pod \"nova-scheduler-0\" (UID: \"d1e87443-2d75-4063-934c-dc593d03987c\") " pod="openstack/nova-scheduler-0" Jan 06 08:37:39 crc kubenswrapper[4784]: I0106 08:37:39.069098 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 06 08:37:39 crc kubenswrapper[4784]: I0106 08:37:39.235237 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"19f8ed37-5996-433b-9915-97489c1d8f11","Type":"ContainerStarted","Data":"7947cb94b8d88ad4df644dc547771879ad687dece99ce064512f4521ede7a217"} Jan 06 08:37:39 crc kubenswrapper[4784]: I0106 08:37:39.235813 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"19f8ed37-5996-433b-9915-97489c1d8f11","Type":"ContainerStarted","Data":"501fd29e0c03aad226668f3a9429c1898d430f716269513ca50915d1f53a01da"} Jan 06 08:37:39 crc kubenswrapper[4784]: I0106 08:37:39.235847 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"19f8ed37-5996-433b-9915-97489c1d8f11","Type":"ContainerStarted","Data":"30be62fa6f5e59d59e40f41f49aacc0bb8277eb581cb78bfa26519f607f5bba7"} Jan 06 08:37:39 crc kubenswrapper[4784]: I0106 08:37:39.237519 4784 generic.go:334] "Generic (PLEG): container finished" podID="f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" containerID="cf52ed937a7eb23c4e9f98d98a78f01d34ba3a53d8fa4db35cca24da66e91bc1" exitCode=143 Jan 06 08:37:39 crc kubenswrapper[4784]: I0106 08:37:39.237574 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579","Type":"ContainerDied","Data":"cf52ed937a7eb23c4e9f98d98a78f01d34ba3a53d8fa4db35cca24da66e91bc1"} Jan 06 08:37:39 crc kubenswrapper[4784]: I0106 08:37:39.257967 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.257943929 podStartE2EDuration="2.257943929s" podCreationTimestamp="2026-01-06 08:37:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:37:39.254882533 +0000 UTC m=+1361.301055370" watchObservedRunningTime="2026-01-06 08:37:39.257943929 +0000 UTC m=+1361.304116766" Jan 06 08:37:39 crc kubenswrapper[4784]: I0106 08:37:39.599757 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:37:40 crc kubenswrapper[4784]: I0106 08:37:40.258192 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d1e87443-2d75-4063-934c-dc593d03987c","Type":"ContainerStarted","Data":"69445d35f26da5bf10ec6be0ee60f62f15246b38e92ee1a9507d816397d15902"} Jan 06 08:37:40 crc kubenswrapper[4784]: I0106 08:37:40.258248 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d1e87443-2d75-4063-934c-dc593d03987c","Type":"ContainerStarted","Data":"2decc0eaea9f303f34954ab20b57f7c7061d9199bd2b71a622ab717b9ac810f0"} Jan 06 08:37:40 crc kubenswrapper[4784]: I0106 08:37:40.291316 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.291230813 podStartE2EDuration="2.291230813s" podCreationTimestamp="2026-01-06 08:37:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:37:40.284832245 +0000 UTC m=+1362.331005102" watchObservedRunningTime="2026-01-06 08:37:40.291230813 +0000 UTC m=+1362.337403700" Jan 06 08:37:40 crc kubenswrapper[4784]: I0106 08:37:40.336853 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e978fb0-450f-4c11-bdb8-6bc6b181d471" path="/var/lib/kubelet/pods/0e978fb0-450f-4c11-bdb8-6bc6b181d471/volumes" Jan 06 08:37:41 crc kubenswrapper[4784]: I0106 08:37:41.364815 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.196:8775/\": read tcp 10.217.0.2:40546->10.217.0.196:8775: read: connection reset by peer" Jan 06 08:37:41 crc kubenswrapper[4784]: I0106 08:37:41.364918 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.196:8775/\": read tcp 10.217.0.2:40532->10.217.0.196:8775: read: connection reset by peer" Jan 06 08:37:41 crc kubenswrapper[4784]: I0106 08:37:41.926401 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.064945 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-nova-metadata-tls-certs\") pod \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.065123 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-logs\") pod \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.065275 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5rbk\" (UniqueName: \"kubernetes.io/projected/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-kube-api-access-g5rbk\") pod \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.065315 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-config-data\") pod \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.065469 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-combined-ca-bundle\") pod \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\" (UID: \"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579\") " Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.068026 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-logs" (OuterVolumeSpecName: "logs") pod "f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" (UID: "f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.075730 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-kube-api-access-g5rbk" (OuterVolumeSpecName: "kube-api-access-g5rbk") pod "f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" (UID: "f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579"). InnerVolumeSpecName "kube-api-access-g5rbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.103908 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-config-data" (OuterVolumeSpecName: "config-data") pod "f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" (UID: "f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.143726 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" (UID: "f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.148827 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" (UID: "f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.171056 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5rbk\" (UniqueName: \"kubernetes.io/projected/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-kube-api-access-g5rbk\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.171103 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.171115 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.171124 4784 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.171133 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.285981 4784 generic.go:334] "Generic (PLEG): container finished" podID="f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" containerID="a289f9adf484697b5e2e7513e09a34e6a2215f6e2443766fc229cb4cc88bd7ac" exitCode=0 Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.286041 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579","Type":"ContainerDied","Data":"a289f9adf484697b5e2e7513e09a34e6a2215f6e2443766fc229cb4cc88bd7ac"} Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.286082 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579","Type":"ContainerDied","Data":"d3df8a4a5315843186276eb898731510783e7a3b4482004d3d7e943a3ac4b51e"} Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.286110 4784 scope.go:117] "RemoveContainer" containerID="a289f9adf484697b5e2e7513e09a34e6a2215f6e2443766fc229cb4cc88bd7ac" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.286047 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.331144 4784 scope.go:117] "RemoveContainer" containerID="cf52ed937a7eb23c4e9f98d98a78f01d34ba3a53d8fa4db35cca24da66e91bc1" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.335301 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.355672 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.367960 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:37:42 crc kubenswrapper[4784]: E0106 08:37:42.368582 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" containerName="nova-metadata-metadata" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.368600 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" containerName="nova-metadata-metadata" Jan 06 08:37:42 crc kubenswrapper[4784]: E0106 08:37:42.368637 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" containerName="nova-metadata-log" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.368643 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" containerName="nova-metadata-log" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.368886 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" containerName="nova-metadata-log" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.368901 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" containerName="nova-metadata-metadata" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.370238 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.372901 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.373257 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.379621 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.409107 4784 scope.go:117] "RemoveContainer" containerID="a289f9adf484697b5e2e7513e09a34e6a2215f6e2443766fc229cb4cc88bd7ac" Jan 06 08:37:42 crc kubenswrapper[4784]: E0106 08:37:42.409876 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a289f9adf484697b5e2e7513e09a34e6a2215f6e2443766fc229cb4cc88bd7ac\": container with ID starting with a289f9adf484697b5e2e7513e09a34e6a2215f6e2443766fc229cb4cc88bd7ac not found: ID does not exist" containerID="a289f9adf484697b5e2e7513e09a34e6a2215f6e2443766fc229cb4cc88bd7ac" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.409937 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a289f9adf484697b5e2e7513e09a34e6a2215f6e2443766fc229cb4cc88bd7ac"} err="failed to get container status \"a289f9adf484697b5e2e7513e09a34e6a2215f6e2443766fc229cb4cc88bd7ac\": rpc error: code = NotFound desc = could not find container \"a289f9adf484697b5e2e7513e09a34e6a2215f6e2443766fc229cb4cc88bd7ac\": container with ID starting with a289f9adf484697b5e2e7513e09a34e6a2215f6e2443766fc229cb4cc88bd7ac not found: ID does not exist" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.409987 4784 scope.go:117] "RemoveContainer" containerID="cf52ed937a7eb23c4e9f98d98a78f01d34ba3a53d8fa4db35cca24da66e91bc1" Jan 06 08:37:42 crc kubenswrapper[4784]: E0106 08:37:42.410419 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf52ed937a7eb23c4e9f98d98a78f01d34ba3a53d8fa4db35cca24da66e91bc1\": container with ID starting with cf52ed937a7eb23c4e9f98d98a78f01d34ba3a53d8fa4db35cca24da66e91bc1 not found: ID does not exist" containerID="cf52ed937a7eb23c4e9f98d98a78f01d34ba3a53d8fa4db35cca24da66e91bc1" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.410567 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf52ed937a7eb23c4e9f98d98a78f01d34ba3a53d8fa4db35cca24da66e91bc1"} err="failed to get container status \"cf52ed937a7eb23c4e9f98d98a78f01d34ba3a53d8fa4db35cca24da66e91bc1\": rpc error: code = NotFound desc = could not find container \"cf52ed937a7eb23c4e9f98d98a78f01d34ba3a53d8fa4db35cca24da66e91bc1\": container with ID starting with cf52ed937a7eb23c4e9f98d98a78f01d34ba3a53d8fa4db35cca24da66e91bc1 not found: ID does not exist" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.476395 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/79cac775-c143-4370-bf3b-b25e2ca62120-logs\") pod \"nova-metadata-0\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.476465 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.476738 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.477203 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-config-data\") pod \"nova-metadata-0\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.477838 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4f2v\" (UniqueName: \"kubernetes.io/projected/79cac775-c143-4370-bf3b-b25e2ca62120-kube-api-access-v4f2v\") pod \"nova-metadata-0\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.579907 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.579982 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.580079 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-config-data\") pod \"nova-metadata-0\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.580151 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4f2v\" (UniqueName: \"kubernetes.io/projected/79cac775-c143-4370-bf3b-b25e2ca62120-kube-api-access-v4f2v\") pod \"nova-metadata-0\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.580192 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/79cac775-c143-4370-bf3b-b25e2ca62120-logs\") pod \"nova-metadata-0\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.580944 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/79cac775-c143-4370-bf3b-b25e2ca62120-logs\") pod \"nova-metadata-0\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.585445 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.585664 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.586678 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-config-data\") pod \"nova-metadata-0\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.600135 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4f2v\" (UniqueName: \"kubernetes.io/projected/79cac775-c143-4370-bf3b-b25e2ca62120-kube-api-access-v4f2v\") pod \"nova-metadata-0\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " pod="openstack/nova-metadata-0" Jan 06 08:37:42 crc kubenswrapper[4784]: I0106 08:37:42.726971 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 06 08:37:43 crc kubenswrapper[4784]: W0106 08:37:43.247859 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod79cac775_c143_4370_bf3b_b25e2ca62120.slice/crio-ee67f7bb0e1c5ca073830d65cf12e2aa98102c9c26ff9beebb0b46752ce0beac WatchSource:0}: Error finding container ee67f7bb0e1c5ca073830d65cf12e2aa98102c9c26ff9beebb0b46752ce0beac: Status 404 returned error can't find the container with id ee67f7bb0e1c5ca073830d65cf12e2aa98102c9c26ff9beebb0b46752ce0beac Jan 06 08:37:43 crc kubenswrapper[4784]: I0106 08:37:43.250568 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:37:43 crc kubenswrapper[4784]: I0106 08:37:43.302271 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"79cac775-c143-4370-bf3b-b25e2ca62120","Type":"ContainerStarted","Data":"ee67f7bb0e1c5ca073830d65cf12e2aa98102c9c26ff9beebb0b46752ce0beac"} Jan 06 08:37:44 crc kubenswrapper[4784]: I0106 08:37:44.069841 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 06 08:37:44 crc kubenswrapper[4784]: I0106 08:37:44.345400 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579" path="/var/lib/kubelet/pods/f6a2326e-d6a9-4fc4-b271-ea0a8e1bf579/volumes" Jan 06 08:37:44 crc kubenswrapper[4784]: I0106 08:37:44.346394 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"79cac775-c143-4370-bf3b-b25e2ca62120","Type":"ContainerStarted","Data":"00d276555cdbfb9bcc2cbe375f52631931dd7b8c8efe7fb96c6f9c7ad948b1ab"} Jan 06 08:37:44 crc kubenswrapper[4784]: I0106 08:37:44.346449 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"79cac775-c143-4370-bf3b-b25e2ca62120","Type":"ContainerStarted","Data":"fd90bc399e2a254f94bbf15631cc9bd41d01a3e85ba8564aafc267bb679d9db4"} Jan 06 08:37:44 crc kubenswrapper[4784]: I0106 08:37:44.350720 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:37:44 crc kubenswrapper[4784]: I0106 08:37:44.351095 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:37:44 crc kubenswrapper[4784]: I0106 08:37:44.392940 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.392913186 podStartE2EDuration="2.392913186s" podCreationTimestamp="2026-01-06 08:37:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 08:37:44.369610853 +0000 UTC m=+1366.415783700" watchObservedRunningTime="2026-01-06 08:37:44.392913186 +0000 UTC m=+1366.439086043" Jan 06 08:37:47 crc kubenswrapper[4784]: I0106 08:37:47.728159 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 06 08:37:47 crc kubenswrapper[4784]: I0106 08:37:47.729101 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 06 08:37:48 crc kubenswrapper[4784]: I0106 08:37:48.003285 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 06 08:37:48 crc kubenswrapper[4784]: I0106 08:37:48.003351 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 06 08:37:49 crc kubenswrapper[4784]: I0106 08:37:49.014884 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="19f8ed37-5996-433b-9915-97489c1d8f11" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.202:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 06 08:37:49 crc kubenswrapper[4784]: I0106 08:37:49.015437 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="19f8ed37-5996-433b-9915-97489c1d8f11" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.202:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 06 08:37:49 crc kubenswrapper[4784]: I0106 08:37:49.070125 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 06 08:37:49 crc kubenswrapper[4784]: I0106 08:37:49.123989 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 06 08:37:49 crc kubenswrapper[4784]: I0106 08:37:49.445873 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 06 08:37:52 crc kubenswrapper[4784]: I0106 08:37:52.727313 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 06 08:37:52 crc kubenswrapper[4784]: I0106 08:37:52.729928 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 06 08:37:53 crc kubenswrapper[4784]: I0106 08:37:53.327708 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 06 08:37:53 crc kubenswrapper[4784]: I0106 08:37:53.747809 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="79cac775-c143-4370-bf3b-b25e2ca62120" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 06 08:37:53 crc kubenswrapper[4784]: I0106 08:37:53.747887 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="79cac775-c143-4370-bf3b-b25e2ca62120" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 06 08:37:58 crc kubenswrapper[4784]: I0106 08:37:58.013919 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 06 08:37:58 crc kubenswrapper[4784]: I0106 08:37:58.015373 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 06 08:37:58 crc kubenswrapper[4784]: I0106 08:37:58.022033 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 06 08:37:58 crc kubenswrapper[4784]: I0106 08:37:58.026995 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 06 08:37:58 crc kubenswrapper[4784]: I0106 08:37:58.531558 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 06 08:37:58 crc kubenswrapper[4784]: I0106 08:37:58.539045 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 06 08:38:02 crc kubenswrapper[4784]: I0106 08:38:02.739041 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 06 08:38:02 crc kubenswrapper[4784]: I0106 08:38:02.749399 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 06 08:38:02 crc kubenswrapper[4784]: I0106 08:38:02.762691 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 06 08:38:03 crc kubenswrapper[4784]: I0106 08:38:03.599310 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 06 08:38:14 crc kubenswrapper[4784]: I0106 08:38:14.351577 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:38:14 crc kubenswrapper[4784]: I0106 08:38:14.352268 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:38:24 crc kubenswrapper[4784]: I0106 08:38:24.808137 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 06 08:38:24 crc kubenswrapper[4784]: I0106 08:38:24.809224 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="d1dc9219-aca3-47c5-b8f7-37799235c2a9" containerName="openstackclient" containerID="cri-o://b858b3f1e24015ec6ea11e61a8507c959309d301f0e42d8b7abf79701332e44f" gracePeriod=2 Jan 06 08:38:24 crc kubenswrapper[4784]: I0106 08:38:24.847902 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 06 08:38:24 crc kubenswrapper[4784]: I0106 08:38:24.928840 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 06 08:38:24 crc kubenswrapper[4784]: I0106 08:38:24.929214 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="5f153c14-0bd9-4c9f-a8fc-c54c80722bce" containerName="nova-cell1-conductor-conductor" containerID="cri-o://286e16d27fa94436ac1831d5cc52871c589d62a999f488353b1b1767a2d56d65" gracePeriod=30 Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.067738 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-c78a-account-create-update-zjr59"] Jan 06 08:38:25 crc kubenswrapper[4784]: E0106 08:38:25.084591 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1dc9219-aca3-47c5-b8f7-37799235c2a9" containerName="openstackclient" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.084638 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1dc9219-aca3-47c5-b8f7-37799235c2a9" containerName="openstackclient" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.085445 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1dc9219-aca3-47c5-b8f7-37799235c2a9" containerName="openstackclient" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.091902 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c78a-account-create-update-zjr59" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.132414 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.152785 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-d356-account-create-update-2prdj"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.164725 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d356-account-create-update-2prdj" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.204373 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.212667 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-sx6jh"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.214223 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-sx6jh" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.239954 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.258398 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.258757 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="2585ada6-db24-4639-9f3b-d52919149935" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://fea8e4389874b9bc56d905c5919e4d5d64c1dda16388208e5bca742ee7cd64e7" gracePeriod=30 Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.264426 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/de90cd55-5430-48c7-b519-a4398e495607-operator-scripts\") pod \"cinder-c78a-account-create-update-zjr59\" (UID: \"de90cd55-5430-48c7-b519-a4398e495607\") " pod="openstack/cinder-c78a-account-create-update-zjr59" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.264486 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdcgk\" (UniqueName: \"kubernetes.io/projected/de90cd55-5430-48c7-b519-a4398e495607-kube-api-access-fdcgk\") pod \"cinder-c78a-account-create-update-zjr59\" (UID: \"de90cd55-5430-48c7-b519-a4398e495607\") " pod="openstack/cinder-c78a-account-create-update-zjr59" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.268732 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2mwl\" (UniqueName: \"kubernetes.io/projected/0a98e0fb-025b-4a3c-8bbb-2a2f20132e38-kube-api-access-z2mwl\") pod \"barbican-d356-account-create-update-2prdj\" (UID: \"0a98e0fb-025b-4a3c-8bbb-2a2f20132e38\") " pod="openstack/barbican-d356-account-create-update-2prdj" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.269285 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a98e0fb-025b-4a3c-8bbb-2a2f20132e38-operator-scripts\") pod \"barbican-d356-account-create-update-2prdj\" (UID: \"0a98e0fb-025b-4a3c-8bbb-2a2f20132e38\") " pod="openstack/barbican-d356-account-create-update-2prdj" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.314345 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-d356-account-create-update-wh749"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.365668 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-c78a-account-create-update-zjr59"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.371068 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9plhn\" (UniqueName: \"kubernetes.io/projected/f364fb9d-ca92-487a-9e6f-6d85a97117d0-kube-api-access-9plhn\") pod \"root-account-create-update-sx6jh\" (UID: \"f364fb9d-ca92-487a-9e6f-6d85a97117d0\") " pod="openstack/root-account-create-update-sx6jh" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.371123 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/de90cd55-5430-48c7-b519-a4398e495607-operator-scripts\") pod \"cinder-c78a-account-create-update-zjr59\" (UID: \"de90cd55-5430-48c7-b519-a4398e495607\") " pod="openstack/cinder-c78a-account-create-update-zjr59" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.371156 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdcgk\" (UniqueName: \"kubernetes.io/projected/de90cd55-5430-48c7-b519-a4398e495607-kube-api-access-fdcgk\") pod \"cinder-c78a-account-create-update-zjr59\" (UID: \"de90cd55-5430-48c7-b519-a4398e495607\") " pod="openstack/cinder-c78a-account-create-update-zjr59" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.371209 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2mwl\" (UniqueName: \"kubernetes.io/projected/0a98e0fb-025b-4a3c-8bbb-2a2f20132e38-kube-api-access-z2mwl\") pod \"barbican-d356-account-create-update-2prdj\" (UID: \"0a98e0fb-025b-4a3c-8bbb-2a2f20132e38\") " pod="openstack/barbican-d356-account-create-update-2prdj" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.371257 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f364fb9d-ca92-487a-9e6f-6d85a97117d0-operator-scripts\") pod \"root-account-create-update-sx6jh\" (UID: \"f364fb9d-ca92-487a-9e6f-6d85a97117d0\") " pod="openstack/root-account-create-update-sx6jh" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.371293 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a98e0fb-025b-4a3c-8bbb-2a2f20132e38-operator-scripts\") pod \"barbican-d356-account-create-update-2prdj\" (UID: \"0a98e0fb-025b-4a3c-8bbb-2a2f20132e38\") " pod="openstack/barbican-d356-account-create-update-2prdj" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.372047 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a98e0fb-025b-4a3c-8bbb-2a2f20132e38-operator-scripts\") pod \"barbican-d356-account-create-update-2prdj\" (UID: \"0a98e0fb-025b-4a3c-8bbb-2a2f20132e38\") " pod="openstack/barbican-d356-account-create-update-2prdj" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.373534 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/de90cd55-5430-48c7-b519-a4398e495607-operator-scripts\") pod \"cinder-c78a-account-create-update-zjr59\" (UID: \"de90cd55-5430-48c7-b519-a4398e495607\") " pod="openstack/cinder-c78a-account-create-update-zjr59" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.424749 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdcgk\" (UniqueName: \"kubernetes.io/projected/de90cd55-5430-48c7-b519-a4398e495607-kube-api-access-fdcgk\") pod \"cinder-c78a-account-create-update-zjr59\" (UID: \"de90cd55-5430-48c7-b519-a4398e495607\") " pod="openstack/cinder-c78a-account-create-update-zjr59" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.426011 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-d356-account-create-update-2prdj"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.435682 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2mwl\" (UniqueName: \"kubernetes.io/projected/0a98e0fb-025b-4a3c-8bbb-2a2f20132e38-kube-api-access-z2mwl\") pod \"barbican-d356-account-create-update-2prdj\" (UID: \"0a98e0fb-025b-4a3c-8bbb-2a2f20132e38\") " pod="openstack/barbican-d356-account-create-update-2prdj" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.450858 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-d356-account-create-update-wh749"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.474031 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9plhn\" (UniqueName: \"kubernetes.io/projected/f364fb9d-ca92-487a-9e6f-6d85a97117d0-kube-api-access-9plhn\") pod \"root-account-create-update-sx6jh\" (UID: \"f364fb9d-ca92-487a-9e6f-6d85a97117d0\") " pod="openstack/root-account-create-update-sx6jh" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.474177 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f364fb9d-ca92-487a-9e6f-6d85a97117d0-operator-scripts\") pod \"root-account-create-update-sx6jh\" (UID: \"f364fb9d-ca92-487a-9e6f-6d85a97117d0\") " pod="openstack/root-account-create-update-sx6jh" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.475037 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f364fb9d-ca92-487a-9e6f-6d85a97117d0-operator-scripts\") pod \"root-account-create-update-sx6jh\" (UID: \"f364fb9d-ca92-487a-9e6f-6d85a97117d0\") " pod="openstack/root-account-create-update-sx6jh" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.476719 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c78a-account-create-update-zjr59" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.488368 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-c78a-account-create-update-97xxc"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.552629 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-sx6jh"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.569490 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d356-account-create-update-2prdj" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.611580 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-c78a-account-create-update-97xxc"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.624243 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9plhn\" (UniqueName: \"kubernetes.io/projected/f364fb9d-ca92-487a-9e6f-6d85a97117d0-kube-api-access-9plhn\") pod \"root-account-create-update-sx6jh\" (UID: \"f364fb9d-ca92-487a-9e6f-6d85a97117d0\") " pod="openstack/root-account-create-update-sx6jh" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.698626 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.726132 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-fg6t2"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.749851 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-fg6t2"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.780389 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-c594-account-create-update-qhcs2"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.782084 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c594-account-create-update-qhcs2" Jan 06 08:38:25 crc kubenswrapper[4784]: E0106 08:38:25.796597 4784 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 06 08:38:25 crc kubenswrapper[4784]: E0106 08:38:25.796692 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data podName:41c89df0-d35f-4f47-86f3-71a2c0971d79 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:26.296659473 +0000 UTC m=+1408.342832310 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data") pod "rabbitmq-cell1-server-0" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79") : configmap "rabbitmq-cell1-config-data" not found Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.800378 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-c594-account-create-update-qhcs2"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.818920 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.886240 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-361f-account-create-update-94fnx"] Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.887691 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-361f-account-create-update-94fnx" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.895496 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11de2666-488c-4ccd-8d33-37e9e957a8c8-operator-scripts\") pod \"placement-c594-account-create-update-qhcs2\" (UID: \"11de2666-488c-4ccd-8d33-37e9e957a8c8\") " pod="openstack/placement-c594-account-create-update-qhcs2" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.895652 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mzls\" (UniqueName: \"kubernetes.io/projected/11de2666-488c-4ccd-8d33-37e9e957a8c8-kube-api-access-9mzls\") pod \"placement-c594-account-create-update-qhcs2\" (UID: \"11de2666-488c-4ccd-8d33-37e9e957a8c8\") " pod="openstack/placement-c594-account-create-update-qhcs2" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.901694 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.907598 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-sx6jh" Jan 06 08:38:25 crc kubenswrapper[4784]: I0106 08:38:25.934560 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-c594-account-create-update-k9b28"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:25.998144 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mzls\" (UniqueName: \"kubernetes.io/projected/11de2666-488c-4ccd-8d33-37e9e957a8c8-kube-api-access-9mzls\") pod \"placement-c594-account-create-update-qhcs2\" (UID: \"11de2666-488c-4ccd-8d33-37e9e957a8c8\") " pod="openstack/placement-c594-account-create-update-qhcs2" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:25.998221 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a-operator-scripts\") pod \"nova-api-361f-account-create-update-94fnx\" (UID: \"b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a\") " pod="openstack/nova-api-361f-account-create-update-94fnx" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:25.998252 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mggl2\" (UniqueName: \"kubernetes.io/projected/b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a-kube-api-access-mggl2\") pod \"nova-api-361f-account-create-update-94fnx\" (UID: \"b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a\") " pod="openstack/nova-api-361f-account-create-update-94fnx" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:25.998346 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11de2666-488c-4ccd-8d33-37e9e957a8c8-operator-scripts\") pod \"placement-c594-account-create-update-qhcs2\" (UID: \"11de2666-488c-4ccd-8d33-37e9e957a8c8\") " pod="openstack/placement-c594-account-create-update-qhcs2" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:25.999148 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11de2666-488c-4ccd-8d33-37e9e957a8c8-operator-scripts\") pod \"placement-c594-account-create-update-qhcs2\" (UID: \"11de2666-488c-4ccd-8d33-37e9e957a8c8\") " pod="openstack/placement-c594-account-create-update-qhcs2" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.005417 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-j4gnj"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.055308 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mzls\" (UniqueName: \"kubernetes.io/projected/11de2666-488c-4ccd-8d33-37e9e957a8c8-kube-api-access-9mzls\") pod \"placement-c594-account-create-update-qhcs2\" (UID: \"11de2666-488c-4ccd-8d33-37e9e957a8c8\") " pod="openstack/placement-c594-account-create-update-qhcs2" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.055403 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-c594-account-create-update-k9b28"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.103356 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a-operator-scripts\") pod \"nova-api-361f-account-create-update-94fnx\" (UID: \"b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a\") " pod="openstack/nova-api-361f-account-create-update-94fnx" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.103433 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mggl2\" (UniqueName: \"kubernetes.io/projected/b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a-kube-api-access-mggl2\") pod \"nova-api-361f-account-create-update-94fnx\" (UID: \"b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a\") " pod="openstack/nova-api-361f-account-create-update-94fnx" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.104582 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a-operator-scripts\") pod \"nova-api-361f-account-create-update-94fnx\" (UID: \"b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a\") " pod="openstack/nova-api-361f-account-create-update-94fnx" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.114455 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-vwmz9"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.136784 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-361f-account-create-update-wzgwj"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.189190 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c594-account-create-update-qhcs2" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.194160 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-j4gnj"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.227822 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mggl2\" (UniqueName: \"kubernetes.io/projected/b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a-kube-api-access-mggl2\") pod \"nova-api-361f-account-create-update-94fnx\" (UID: \"b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a\") " pod="openstack/nova-api-361f-account-create-update-94fnx" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.231622 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-361f-account-create-update-94fnx"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.261170 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-361f-account-create-update-94fnx" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.266886 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-vwmz9"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.307637 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-361f-account-create-update-wzgwj"] Jan 06 08:38:26 crc kubenswrapper[4784]: E0106 08:38:26.313284 4784 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 06 08:38:26 crc kubenswrapper[4784]: E0106 08:38:26.313365 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data podName:41c89df0-d35f-4f47-86f3-71a2c0971d79 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:27.313346244 +0000 UTC m=+1409.359519081 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data") pod "rabbitmq-cell1-server-0" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79") : configmap "rabbitmq-cell1-config-data" not found Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.458329 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-cell1-novncproxy-0" podUID="2585ada6-db24-4639-9f3b-d52919149935" containerName="nova-cell1-novncproxy-novncproxy" probeResult="failure" output="Get \"https://10.217.0.197:6080/vnc_lite.html\": dial tcp 10.217.0.197:6080: connect: connection refused" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.536072 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c39e426-916f-4f00-927d-fad49789b75e" path="/var/lib/kubelet/pods/3c39e426-916f-4f00-927d-fad49789b75e/volumes" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.540970 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f6f606e-fa4d-45d8-95a1-04a052ca0dbf" path="/var/lib/kubelet/pods/3f6f606e-fa4d-45d8-95a1-04a052ca0dbf/volumes" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.541835 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="441e6fe5-5597-44c2-883e-7279b94a858c" path="/var/lib/kubelet/pods/441e6fe5-5597-44c2-883e-7279b94a858c/volumes" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.546965 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7cbba6e7-1e02-4788-9e12-00c5c3391a86" path="/var/lib/kubelet/pods/7cbba6e7-1e02-4788-9e12-00c5c3391a86/volumes" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.549081 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81b4221b-0db4-4391-a152-951dbe6700cb" path="/var/lib/kubelet/pods/81b4221b-0db4-4391-a152-951dbe6700cb/volumes" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.550197 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee0707c4-0382-44eb-b6e9-0a2c076862ae" path="/var/lib/kubelet/pods/ee0707c4-0382-44eb-b6e9-0a2c076862ae/volumes" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.550759 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f57eec82-c0b1-41de-8082-a096a3e73acc" path="/var/lib/kubelet/pods/f57eec82-c0b1-41de-8082-a096a3e73acc/volumes" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.552038 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-7e75-account-create-update-t2sj4"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.552072 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-7e75-account-create-update-t2sj4"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.552091 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-7e75-account-create-update-95l2w"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.554526 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-57tfg"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.554690 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-57tfg"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.554712 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-7e75-account-create-update-95l2w"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.554813 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7e75-account-create-update-95l2w" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.559009 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.653846 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-7qsrr"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.680672 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pttk\" (UniqueName: \"kubernetes.io/projected/b3f9f481-e72f-47a3-bd2c-33ec9bb8025b-kube-api-access-7pttk\") pod \"nova-cell0-7e75-account-create-update-95l2w\" (UID: \"b3f9f481-e72f-47a3-bd2c-33ec9bb8025b\") " pod="openstack/nova-cell0-7e75-account-create-update-95l2w" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.680883 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b3f9f481-e72f-47a3-bd2c-33ec9bb8025b-operator-scripts\") pod \"nova-cell0-7e75-account-create-update-95l2w\" (UID: \"b3f9f481-e72f-47a3-bd2c-33ec9bb8025b\") " pod="openstack/nova-cell0-7e75-account-create-update-95l2w" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.691632 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-7qsrr"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.785621 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pttk\" (UniqueName: \"kubernetes.io/projected/b3f9f481-e72f-47a3-bd2c-33ec9bb8025b-kube-api-access-7pttk\") pod \"nova-cell0-7e75-account-create-update-95l2w\" (UID: \"b3f9f481-e72f-47a3-bd2c-33ec9bb8025b\") " pod="openstack/nova-cell0-7e75-account-create-update-95l2w" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.785802 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b3f9f481-e72f-47a3-bd2c-33ec9bb8025b-operator-scripts\") pod \"nova-cell0-7e75-account-create-update-95l2w\" (UID: \"b3f9f481-e72f-47a3-bd2c-33ec9bb8025b\") " pod="openstack/nova-cell0-7e75-account-create-update-95l2w" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.787751 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b3f9f481-e72f-47a3-bd2c-33ec9bb8025b-operator-scripts\") pod \"nova-cell0-7e75-account-create-update-95l2w\" (UID: \"b3f9f481-e72f-47a3-bd2c-33ec9bb8025b\") " pod="openstack/nova-cell0-7e75-account-create-update-95l2w" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.797348 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-2n9kz"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.806738 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-bqm59"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.807036 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-bqm59" podUID="5b70b310-f1bb-4b3b-b679-9c11f98367ee" containerName="openstack-network-exporter" containerID="cri-o://45e04d5527cf93bae10ed29f13197ad0a095715926a5b063cac2ca9387bdb303" gracePeriod=30 Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.824731 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pttk\" (UniqueName: \"kubernetes.io/projected/b3f9f481-e72f-47a3-bd2c-33ec9bb8025b-kube-api-access-7pttk\") pod \"nova-cell0-7e75-account-create-update-95l2w\" (UID: \"b3f9f481-e72f-47a3-bd2c-33ec9bb8025b\") " pod="openstack/nova-cell0-7e75-account-create-update-95l2w" Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.856410 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-8tvjg"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.878619 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-2c71-account-create-update-sgvgl"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.903866 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-2c71-account-create-update-sgvgl"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.926757 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-9bdd7"] Jan 06 08:38:26 crc kubenswrapper[4784]: E0106 08:38:26.936497 4784 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.102:49670->38.102.83.102:38977: write tcp 38.102.83.102:49670->38.102.83.102:38977: write: broken pipe Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.941916 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.942301 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="1c98eb91-7877-4dd7-b694-52b017726242" containerName="ovn-northd" containerID="cri-o://b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c" gracePeriod=30 Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.943028 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="1c98eb91-7877-4dd7-b694-52b017726242" containerName="openstack-network-exporter" containerID="cri-o://3c30f92b8011e87722d0ed074d9c419ca54128be08ee18cd99b32d3ef8974baf" gracePeriod=30 Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.968222 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-9bdd7"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.980976 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.982313 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="ce785668-f5b3-4be6-b466-d1041d0190d1" containerName="openstack-network-exporter" containerID="cri-o://85ff42bd642efa429ae58eddee287d22c43869bed771a6ed4057e860b56b4123" gracePeriod=300 Jan 06 08:38:26 crc kubenswrapper[4784]: I0106 08:38:26.993642 4784 generic.go:334] "Generic (PLEG): container finished" podID="d1dc9219-aca3-47c5-b8f7-37799235c2a9" containerID="b858b3f1e24015ec6ea11e61a8507c959309d301f0e42d8b7abf79701332e44f" exitCode=137 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.005890 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.013927 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="700a4853-cbd0-4cc6-8322-d9296caadf34" containerName="openstack-network-exporter" containerID="cri-o://a7e71ce3ce6e75c5e79d87518db38bd414fa98abc85f8580bc7cbfd40aaa0044" gracePeriod=300 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.031387 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7e75-account-create-update-95l2w" Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.038975 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.068158 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-849db5db7c-vjb4f"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.069109 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-849db5db7c-vjb4f" podUID="bed6a7b9-0069-4ea7-b813-70a5808d18db" containerName="neutron-httpd" containerID="cri-o://b771c84b2fee4dfd30eea462159329d4aa16e7a25274eb645e61e328c8f50840" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.069566 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-849db5db7c-vjb4f" podUID="bed6a7b9-0069-4ea7-b813-70a5808d18db" containerName="neutron-api" containerID="cri-o://3dbabc5c6ed9e67890a74683afbfe123a07fdb5e9564f6fd5fe7f8edcd519e4c" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.085661 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-79rk8"] Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.106876 4784 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.106966 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data podName:052ecaa6-58fd-42ed-b2c5-6b8919470619 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:27.606944124 +0000 UTC m=+1409.653116961 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data") pod "rabbitmq-server-0" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619") : configmap "rabbitmq-config-data" not found Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.109867 4784 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-8tvjg" message="Exiting ovn-controller (1) " Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.109914 4784 kuberuntime_container.go:691] "PreStop hook failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " pod="openstack/ovn-controller-8tvjg" podUID="0ef35db6-a440-4394-a26f-750a29488828" containerName="ovn-controller" containerID="cri-o://18606de9cd379cec6d03e611706882cefcfe82d03d59dd6bd082573a183009d9" Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.109946 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-8tvjg" podUID="0ef35db6-a440-4394-a26f-750a29488828" containerName="ovn-controller" containerID="cri-o://18606de9cd379cec6d03e611706882cefcfe82d03d59dd6bd082573a183009d9" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.109889 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-79rk8"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.125837 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.126346 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="fecd8c1e-482d-4469-a884-c357e0e66fe0" containerName="glance-log" containerID="cri-o://c7a53b94cb251c8ef3e62bbecc07389e6162337f8fbd7425b6a2aa4930128cb4" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.127313 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="fecd8c1e-482d-4469-a884-c357e0e66fe0" containerName="glance-httpd" containerID="cri-o://daa8748da271a4d548f8c192fbf0ea343adcd94a49510154aa63c807c38815e3" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.129528 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="ce785668-f5b3-4be6-b466-d1041d0190d1" containerName="ovsdbserver-sb" containerID="cri-o://e1e26be922c57e54539f2aa8adb35b7ddeaf1093c84fe1a99379176abf764f01" gracePeriod=300 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.134152 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="700a4853-cbd0-4cc6-8322-d9296caadf34" containerName="ovsdbserver-nb" containerID="cri-o://2df75ac1bbb78adf7fae00bb5912e0ed3ad738ffa37d0df9b3d4f689510b22fa" gracePeriod=300 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.151634 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-cwjjl"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.171624 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-xt7gs"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.171982 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" podUID="8e67aeba-582a-470f-a40f-e1def33f01d2" containerName="dnsmasq-dns" containerID="cri-o://26a913b7bf6de54902c4672761268a5ffa207b1d5eb9d15f8383c8ef9d0df22f" gracePeriod=10 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.194235 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-cwjjl"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.235601 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.235934 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="e94ed326-8f56-4933-8616-5814505b58f5" containerName="glance-log" containerID="cri-o://538fb0d95d0196cf8efa1743e88a29f2ea31f008245abb93870ac9e8829ca9a8" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.236489 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="e94ed326-8f56-4933-8616-5814505b58f5" containerName="glance-httpd" containerID="cri-o://54f4dd31065d6db90e2a8b67fbc54756e16347bc95f37040733d2d6e10eb17a1" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.243398 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.243760 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="162189cc-1d37-4526-b83c-f36183f40b49" containerName="cinder-scheduler" containerID="cri-o://5d81264728e21f0de5897f691f16a0a600c9fc6f290f4b6c5b4cf42420927001" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.243938 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="162189cc-1d37-4526-b83c-f36183f40b49" containerName="probe" containerID="cri-o://254f5436d55af633a01d076f3a43e18f1370e7e15307f5c1d0c79c703303c8f9" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.257703 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.259936 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="15cd1678-570e-47b5-bcb0-6745b8aa95cb" containerName="cinder-api-log" containerID="cri-o://9f1c3888df0343f9b1d8b44706fe2ef8ae987e71456454a609fae003c73320e1" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.261057 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="15cd1678-570e-47b5-bcb0-6745b8aa95cb" containerName="cinder-api" containerID="cri-o://952523238c0159b22bae45fd0feb984398f4e41c261b86b0227306adbbc37885" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.271458 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.272068 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-server" containerID="cri-o://6113f42bede3a27a0cb54b6277716f1e062747b6f20d0f6170df3e915df36563" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.272829 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-updater" containerID="cri-o://fdfc98f3c2d5c499ec740f96d17f27d1a06a4729300bbfc140e64fc4172f8f42" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.273014 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="swift-recon-cron" containerID="cri-o://47cf27a1d579d06ca4f6c0124e1a7218eaa708d4b9f10e03cf5124c1b88d16b7" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.273087 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="rsync" containerID="cri-o://4ba7f085163f761a926fc23b3df8baf41bed014e786fa75f064425ec412d6aac" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.273146 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-expirer" containerID="cri-o://92c958d9eb2eef729c21a59d1aa5cd51f0a60f0eee60721df4067e8956812f0d" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.273198 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-updater" containerID="cri-o://2782e4e954d402d8644c704c14fc8b38760f649dd39ca5a39f52b8e5a86c03a1" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.273256 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-auditor" containerID="cri-o://c87da68debafaf9f2a7c72f8afee9751c29efd20b0a9f522e25b46cbb9829297" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.273304 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-replicator" containerID="cri-o://7d3b4c93b777e722f9e32993854eee837ecfe60db1b929d1c3283452b9fc478a" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.273353 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-server" containerID="cri-o://048d0fdb57c0dd31a395544856c4165cd53c4598bcc0c60dcd03e55e8e8cb6bd" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.273425 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-reaper" containerID="cri-o://4e841199dad3d57bec03b6ab32443378e9a39fb3254ba456545d280228b18564" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.273488 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-replicator" containerID="cri-o://bf572454575eb77381e725ef9250e47418c50e419d4f2e3931a28d2c8d07717d" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.273530 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-server" containerID="cri-o://869eb46b39bba54be94fbd147453143836362ba61706362d4c5f22a8bd537f78" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.273605 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-replicator" containerID="cri-o://353a0302ae512e9895ae799f830966d4415b4ddf7909c1b5ffbcba497511d1de" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.273652 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-auditor" containerID="cri-o://0410f39e8c4c8bc197844907a33893de7c643c3e9ad4fa4c7538fe09ef8e89e5" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.273110 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-auditor" containerID="cri-o://26e897885f00517a75035b5f5164eb8d210634e06114a25a61554dd7abccebce" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.303045 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-499kt"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.314407 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-499kt"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.385084 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-8ff6v"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.500280 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-8ff6v"] Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.504567 4784 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.504715 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data podName:41c89df0-d35f-4f47-86f3-71a2c0971d79 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:29.504622671 +0000 UTC m=+1411.550795508 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data") pod "rabbitmq-cell1-server-0" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79") : configmap "rabbitmq-cell1-config-data" not found Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.552570 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2df75ac1bbb78adf7fae00bb5912e0ed3ad738ffa37d0df9b3d4f689510b22fa is running failed: container process not found" containerID="2df75ac1bbb78adf7fae00bb5912e0ed3ad738ffa37d0df9b3d4f689510b22fa" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.553214 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2df75ac1bbb78adf7fae00bb5912e0ed3ad738ffa37d0df9b3d4f689510b22fa is running failed: container process not found" containerID="2df75ac1bbb78adf7fae00bb5912e0ed3ad738ffa37d0df9b3d4f689510b22fa" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.553515 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2df75ac1bbb78adf7fae00bb5912e0ed3ad738ffa37d0df9b3d4f689510b22fa is running failed: container process not found" containerID="2df75ac1bbb78adf7fae00bb5912e0ed3ad738ffa37d0df9b3d4f689510b22fa" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.553686 4784 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2df75ac1bbb78adf7fae00bb5912e0ed3ad738ffa37d0df9b3d4f689510b22fa is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-nb-0" podUID="700a4853-cbd0-4cc6-8322-d9296caadf34" containerName="ovsdbserver-nb" Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.556732 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5759d5cbc4-2r87d"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.557321 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-5759d5cbc4-2r87d" podUID="6cd2b801-83a4-410f-a555-8dfda270713a" containerName="placement-log" containerID="cri-o://07f60e1630bdcd2399c45169de65854f62b21633e41ebfe502b28d51d39226cc" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.558284 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-5759d5cbc4-2r87d" podUID="6cd2b801-83a4-410f-a555-8dfda270713a" containerName="placement-api" containerID="cri-o://7edfdbc25e2ee69a5e5b15b59c0c29dcf7e91b34fc5883ef77bb32176ff55cd7" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.633073 4784 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.633173 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data podName:052ecaa6-58fd-42ed-b2c5-6b8919470619 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:28.633147401 +0000 UTC m=+1410.679320228 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data") pod "rabbitmq-server-0" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619") : configmap "rabbitmq-config-data" not found Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.637417 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-c78a-account-create-update-zjr59"] Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.646038 4784 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Jan 06 08:38:27 crc kubenswrapper[4784]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 06 08:38:27 crc kubenswrapper[4784]: + source /usr/local/bin/container-scripts/functions Jan 06 08:38:27 crc kubenswrapper[4784]: ++ OVNBridge=br-int Jan 06 08:38:27 crc kubenswrapper[4784]: ++ OVNRemote=tcp:localhost:6642 Jan 06 08:38:27 crc kubenswrapper[4784]: ++ OVNEncapType=geneve Jan 06 08:38:27 crc kubenswrapper[4784]: ++ OVNAvailabilityZones= Jan 06 08:38:27 crc kubenswrapper[4784]: ++ EnableChassisAsGateway=true Jan 06 08:38:27 crc kubenswrapper[4784]: ++ PhysicalNetworks= Jan 06 08:38:27 crc kubenswrapper[4784]: ++ OVNHostName= Jan 06 08:38:27 crc kubenswrapper[4784]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 06 08:38:27 crc kubenswrapper[4784]: ++ ovs_dir=/var/lib/openvswitch Jan 06 08:38:27 crc kubenswrapper[4784]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 06 08:38:27 crc kubenswrapper[4784]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 06 08:38:27 crc kubenswrapper[4784]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 06 08:38:27 crc kubenswrapper[4784]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 06 08:38:27 crc kubenswrapper[4784]: + sleep 0.5 Jan 06 08:38:27 crc kubenswrapper[4784]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 06 08:38:27 crc kubenswrapper[4784]: + cleanup_ovsdb_server_semaphore Jan 06 08:38:27 crc kubenswrapper[4784]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 06 08:38:27 crc kubenswrapper[4784]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 06 08:38:27 crc kubenswrapper[4784]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-2n9kz" message=< Jan 06 08:38:27 crc kubenswrapper[4784]: Exiting ovsdb-server (5) [ OK ] Jan 06 08:38:27 crc kubenswrapper[4784]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 06 08:38:27 crc kubenswrapper[4784]: + source /usr/local/bin/container-scripts/functions Jan 06 08:38:27 crc kubenswrapper[4784]: ++ OVNBridge=br-int Jan 06 08:38:27 crc kubenswrapper[4784]: ++ OVNRemote=tcp:localhost:6642 Jan 06 08:38:27 crc kubenswrapper[4784]: ++ OVNEncapType=geneve Jan 06 08:38:27 crc kubenswrapper[4784]: ++ OVNAvailabilityZones= Jan 06 08:38:27 crc kubenswrapper[4784]: ++ EnableChassisAsGateway=true Jan 06 08:38:27 crc kubenswrapper[4784]: ++ PhysicalNetworks= Jan 06 08:38:27 crc kubenswrapper[4784]: ++ OVNHostName= Jan 06 08:38:27 crc kubenswrapper[4784]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 06 08:38:27 crc kubenswrapper[4784]: ++ ovs_dir=/var/lib/openvswitch Jan 06 08:38:27 crc kubenswrapper[4784]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 06 08:38:27 crc kubenswrapper[4784]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 06 08:38:27 crc kubenswrapper[4784]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 06 08:38:27 crc kubenswrapper[4784]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 06 08:38:27 crc kubenswrapper[4784]: + sleep 0.5 Jan 06 08:38:27 crc kubenswrapper[4784]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 06 08:38:27 crc kubenswrapper[4784]: + cleanup_ovsdb_server_semaphore Jan 06 08:38:27 crc kubenswrapper[4784]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 06 08:38:27 crc kubenswrapper[4784]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 06 08:38:27 crc kubenswrapper[4784]: > Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.646104 4784 kuberuntime_container.go:691] "PreStop hook failed" err=< Jan 06 08:38:27 crc kubenswrapper[4784]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 06 08:38:27 crc kubenswrapper[4784]: + source /usr/local/bin/container-scripts/functions Jan 06 08:38:27 crc kubenswrapper[4784]: ++ OVNBridge=br-int Jan 06 08:38:27 crc kubenswrapper[4784]: ++ OVNRemote=tcp:localhost:6642 Jan 06 08:38:27 crc kubenswrapper[4784]: ++ OVNEncapType=geneve Jan 06 08:38:27 crc kubenswrapper[4784]: ++ OVNAvailabilityZones= Jan 06 08:38:27 crc kubenswrapper[4784]: ++ EnableChassisAsGateway=true Jan 06 08:38:27 crc kubenswrapper[4784]: ++ PhysicalNetworks= Jan 06 08:38:27 crc kubenswrapper[4784]: ++ OVNHostName= Jan 06 08:38:27 crc kubenswrapper[4784]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 06 08:38:27 crc kubenswrapper[4784]: ++ ovs_dir=/var/lib/openvswitch Jan 06 08:38:27 crc kubenswrapper[4784]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 06 08:38:27 crc kubenswrapper[4784]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 06 08:38:27 crc kubenswrapper[4784]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 06 08:38:27 crc kubenswrapper[4784]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 06 08:38:27 crc kubenswrapper[4784]: + sleep 0.5 Jan 06 08:38:27 crc kubenswrapper[4784]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 06 08:38:27 crc kubenswrapper[4784]: + cleanup_ovsdb_server_semaphore Jan 06 08:38:27 crc kubenswrapper[4784]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 06 08:38:27 crc kubenswrapper[4784]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 06 08:38:27 crc kubenswrapper[4784]: > pod="openstack/ovn-controller-ovs-2n9kz" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovsdb-server" containerID="cri-o://d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.646159 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-2n9kz" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovsdb-server" containerID="cri-o://d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" gracePeriod=30 Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.682687 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-6x9vm"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.716851 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-6x9vm"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.744126 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-7tst7"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.769763 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-28c1-account-create-update-fnqkv"] Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.777828 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="286e16d27fa94436ac1831d5cc52871c589d62a999f488353b1b1767a2d56d65" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.789565 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-7tst7"] Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.794274 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="286e16d27fa94436ac1831d5cc52871c589d62a999f488353b1b1767a2d56d65" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.801088 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-28c1-account-create-update-fnqkv"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.819787 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-2n9kz" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovs-vswitchd" containerID="cri-o://2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" gracePeriod=29 Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.823727 4784 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 06 08:38:27 crc kubenswrapper[4784]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 06 08:38:27 crc kubenswrapper[4784]: Jan 06 08:38:27 crc kubenswrapper[4784]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 06 08:38:27 crc kubenswrapper[4784]: Jan 06 08:38:27 crc kubenswrapper[4784]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 06 08:38:27 crc kubenswrapper[4784]: Jan 06 08:38:27 crc kubenswrapper[4784]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 06 08:38:27 crc kubenswrapper[4784]: Jan 06 08:38:27 crc kubenswrapper[4784]: if [ -n "barbican" ]; then Jan 06 08:38:27 crc kubenswrapper[4784]: GRANT_DATABASE="barbican" Jan 06 08:38:27 crc kubenswrapper[4784]: else Jan 06 08:38:27 crc kubenswrapper[4784]: GRANT_DATABASE="*" Jan 06 08:38:27 crc kubenswrapper[4784]: fi Jan 06 08:38:27 crc kubenswrapper[4784]: Jan 06 08:38:27 crc kubenswrapper[4784]: # going for maximum compatibility here: Jan 06 08:38:27 crc kubenswrapper[4784]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 06 08:38:27 crc kubenswrapper[4784]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 06 08:38:27 crc kubenswrapper[4784]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 06 08:38:27 crc kubenswrapper[4784]: # support updates Jan 06 08:38:27 crc kubenswrapper[4784]: Jan 06 08:38:27 crc kubenswrapper[4784]: $MYSQL_CMD < logger="UnhandledError" Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.824024 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.826050 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"barbican-db-secret\\\" not found\"" pod="openstack/barbican-d356-account-create-update-2prdj" podUID="0a98e0fb-025b-4a3c-8bbb-2a2f20132e38" Jan 06 08:38:27 crc kubenswrapper[4784]: W0106 08:38:27.844710 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde90cd55_5430_48c7_b519_a4398e495607.slice/crio-a55ac9f5c9cd26f259fb83887e86410eaa008ee5d296ec60d38a21498620f27e WatchSource:0}: Error finding container a55ac9f5c9cd26f259fb83887e86410eaa008ee5d296ec60d38a21498620f27e: Status 404 returned error can't find the container with id a55ac9f5c9cd26f259fb83887e86410eaa008ee5d296ec60d38a21498620f27e Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.851251 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="286e16d27fa94436ac1831d5cc52871c589d62a999f488353b1b1767a2d56d65" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.851356 4784 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="5f153c14-0bd9-4c9f-a8fc-c54c80722bce" containerName="nova-cell1-conductor-conductor" Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.851531 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.861277 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-d356-account-create-update-2prdj"] Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.880912 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.884195 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.884262 4784 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="1c98eb91-7877-4dd7-b694-52b017726242" containerName="ovn-northd" Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.903158 4784 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 06 08:38:27 crc kubenswrapper[4784]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 06 08:38:27 crc kubenswrapper[4784]: Jan 06 08:38:27 crc kubenswrapper[4784]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 06 08:38:27 crc kubenswrapper[4784]: Jan 06 08:38:27 crc kubenswrapper[4784]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 06 08:38:27 crc kubenswrapper[4784]: Jan 06 08:38:27 crc kubenswrapper[4784]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 06 08:38:27 crc kubenswrapper[4784]: Jan 06 08:38:27 crc kubenswrapper[4784]: if [ -n "cinder" ]; then Jan 06 08:38:27 crc kubenswrapper[4784]: GRANT_DATABASE="cinder" Jan 06 08:38:27 crc kubenswrapper[4784]: else Jan 06 08:38:27 crc kubenswrapper[4784]: GRANT_DATABASE="*" Jan 06 08:38:27 crc kubenswrapper[4784]: fi Jan 06 08:38:27 crc kubenswrapper[4784]: Jan 06 08:38:27 crc kubenswrapper[4784]: # going for maximum compatibility here: Jan 06 08:38:27 crc kubenswrapper[4784]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 06 08:38:27 crc kubenswrapper[4784]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 06 08:38:27 crc kubenswrapper[4784]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 06 08:38:27 crc kubenswrapper[4784]: # support updates Jan 06 08:38:27 crc kubenswrapper[4784]: Jan 06 08:38:27 crc kubenswrapper[4784]: $MYSQL_CMD < logger="UnhandledError" Jan 06 08:38:27 crc kubenswrapper[4784]: E0106 08:38:27.904381 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"cinder-db-secret\\\" not found\"" pod="openstack/cinder-c78a-account-create-update-zjr59" podUID="de90cd55-5430-48c7-b519-a4398e495607" Jan 06 08:38:27 crc kubenswrapper[4784]: I0106 08:38:27.917560 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.014639 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.014979 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="79cac775-c143-4370-bf3b-b25e2ca62120" containerName="nova-metadata-log" containerID="cri-o://fd90bc399e2a254f94bbf15631cc9bd41d01a3e85ba8564aafc267bb679d9db4" gracePeriod=30 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.015623 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="79cac775-c143-4370-bf3b-b25e2ca62120" containerName="nova-metadata-metadata" containerID="cri-o://00d276555cdbfb9bcc2cbe375f52631931dd7b8c8efe7fb96c6f9c7ad948b1ab" gracePeriod=30 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.028503 4784 scope.go:117] "RemoveContainer" containerID="b858b3f1e24015ec6ea11e61a8507c959309d301f0e42d8b7abf79701332e44f" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.028729 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.044621 4784 generic.go:334] "Generic (PLEG): container finished" podID="15cd1678-570e-47b5-bcb0-6745b8aa95cb" containerID="9f1c3888df0343f9b1d8b44706fe2ef8ae987e71456454a609fae003c73320e1" exitCode=143 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.044757 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"15cd1678-570e-47b5-bcb0-6745b8aa95cb","Type":"ContainerDied","Data":"9f1c3888df0343f9b1d8b44706fe2ef8ae987e71456454a609fae003c73320e1"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.054091 4784 generic.go:334] "Generic (PLEG): container finished" podID="e94ed326-8f56-4933-8616-5814505b58f5" containerID="538fb0d95d0196cf8efa1743e88a29f2ea31f008245abb93870ac9e8829ca9a8" exitCode=143 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.054167 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e94ed326-8f56-4933-8616-5814505b58f5","Type":"ContainerDied","Data":"538fb0d95d0196cf8efa1743e88a29f2ea31f008245abb93870ac9e8829ca9a8"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.061115 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d1dc9219-aca3-47c5-b8f7-37799235c2a9-openstack-config-secret\") pod \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.061185 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d1dc9219-aca3-47c5-b8f7-37799235c2a9-openstack-config\") pod \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.061245 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2t6d2\" (UniqueName: \"kubernetes.io/projected/d1dc9219-aca3-47c5-b8f7-37799235c2a9-kube-api-access-2t6d2\") pod \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.061310 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1dc9219-aca3-47c5-b8f7-37799235c2a9-combined-ca-bundle\") pod \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\" (UID: \"d1dc9219-aca3-47c5-b8f7-37799235c2a9\") " Jan 06 08:38:28 crc kubenswrapper[4784]: E0106 08:38:28.083750 4784 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 06 08:38:28 crc kubenswrapper[4784]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 06 08:38:28 crc kubenswrapper[4784]: Jan 06 08:38:28 crc kubenswrapper[4784]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 06 08:38:28 crc kubenswrapper[4784]: Jan 06 08:38:28 crc kubenswrapper[4784]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 06 08:38:28 crc kubenswrapper[4784]: Jan 06 08:38:28 crc kubenswrapper[4784]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 06 08:38:28 crc kubenswrapper[4784]: Jan 06 08:38:28 crc kubenswrapper[4784]: if [ -n "" ]; then Jan 06 08:38:28 crc kubenswrapper[4784]: GRANT_DATABASE="" Jan 06 08:38:28 crc kubenswrapper[4784]: else Jan 06 08:38:28 crc kubenswrapper[4784]: GRANT_DATABASE="*" Jan 06 08:38:28 crc kubenswrapper[4784]: fi Jan 06 08:38:28 crc kubenswrapper[4784]: Jan 06 08:38:28 crc kubenswrapper[4784]: # going for maximum compatibility here: Jan 06 08:38:28 crc kubenswrapper[4784]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 06 08:38:28 crc kubenswrapper[4784]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 06 08:38:28 crc kubenswrapper[4784]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 06 08:38:28 crc kubenswrapper[4784]: # support updates Jan 06 08:38:28 crc kubenswrapper[4784]: Jan 06 08:38:28 crc kubenswrapper[4784]: $MYSQL_CMD < logger="UnhandledError" Jan 06 08:38:28 crc kubenswrapper[4784]: E0106 08:38:28.086400 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-sx6jh" podUID="f364fb9d-ca92-487a-9e6f-6d85a97117d0" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095297 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2bc0281-fc27-4766-87fa-f16599938e96" containerID="4ba7f085163f761a926fc23b3df8baf41bed014e786fa75f064425ec412d6aac" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095356 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2bc0281-fc27-4766-87fa-f16599938e96" containerID="92c958d9eb2eef729c21a59d1aa5cd51f0a60f0eee60721df4067e8956812f0d" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095364 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2bc0281-fc27-4766-87fa-f16599938e96" containerID="2782e4e954d402d8644c704c14fc8b38760f649dd39ca5a39f52b8e5a86c03a1" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095373 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2bc0281-fc27-4766-87fa-f16599938e96" containerID="c87da68debafaf9f2a7c72f8afee9751c29efd20b0a9f522e25b46cbb9829297" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095398 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2bc0281-fc27-4766-87fa-f16599938e96" containerID="7d3b4c93b777e722f9e32993854eee837ecfe60db1b929d1c3283452b9fc478a" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095407 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2bc0281-fc27-4766-87fa-f16599938e96" containerID="048d0fdb57c0dd31a395544856c4165cd53c4598bcc0c60dcd03e55e8e8cb6bd" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095414 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2bc0281-fc27-4766-87fa-f16599938e96" containerID="fdfc98f3c2d5c499ec740f96d17f27d1a06a4729300bbfc140e64fc4172f8f42" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095421 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2bc0281-fc27-4766-87fa-f16599938e96" containerID="26e897885f00517a75035b5f5164eb8d210634e06114a25a61554dd7abccebce" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095427 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2bc0281-fc27-4766-87fa-f16599938e96" containerID="bf572454575eb77381e725ef9250e47418c50e419d4f2e3931a28d2c8d07717d" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095434 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2bc0281-fc27-4766-87fa-f16599938e96" containerID="4e841199dad3d57bec03b6ab32443378e9a39fb3254ba456545d280228b18564" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095441 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2bc0281-fc27-4766-87fa-f16599938e96" containerID="0410f39e8c4c8bc197844907a33893de7c643c3e9ad4fa4c7538fe09ef8e89e5" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095447 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2bc0281-fc27-4766-87fa-f16599938e96" containerID="353a0302ae512e9895ae799f830966d4415b4ddf7909c1b5ffbcba497511d1de" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095454 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2bc0281-fc27-4766-87fa-f16599938e96" containerID="6113f42bede3a27a0cb54b6277716f1e062747b6f20d0f6170df3e915df36563" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095521 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"4ba7f085163f761a926fc23b3df8baf41bed014e786fa75f064425ec412d6aac"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095570 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"92c958d9eb2eef729c21a59d1aa5cd51f0a60f0eee60721df4067e8956812f0d"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095583 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"2782e4e954d402d8644c704c14fc8b38760f649dd39ca5a39f52b8e5a86c03a1"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095592 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"c87da68debafaf9f2a7c72f8afee9751c29efd20b0a9f522e25b46cbb9829297"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095604 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"7d3b4c93b777e722f9e32993854eee837ecfe60db1b929d1c3283452b9fc478a"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095613 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"048d0fdb57c0dd31a395544856c4165cd53c4598bcc0c60dcd03e55e8e8cb6bd"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095622 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"fdfc98f3c2d5c499ec740f96d17f27d1a06a4729300bbfc140e64fc4172f8f42"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095631 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"26e897885f00517a75035b5f5164eb8d210634e06114a25a61554dd7abccebce"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095654 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"bf572454575eb77381e725ef9250e47418c50e419d4f2e3931a28d2c8d07717d"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095663 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"4e841199dad3d57bec03b6ab32443378e9a39fb3254ba456545d280228b18564"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095675 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"0410f39e8c4c8bc197844907a33893de7c643c3e9ad4fa4c7538fe09ef8e89e5"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095685 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"353a0302ae512e9895ae799f830966d4415b4ddf7909c1b5ffbcba497511d1de"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.095696 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"6113f42bede3a27a0cb54b6277716f1e062747b6f20d0f6170df3e915df36563"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.100031 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-4b287"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.101526 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1dc9219-aca3-47c5-b8f7-37799235c2a9-kube-api-access-2t6d2" (OuterVolumeSpecName: "kube-api-access-2t6d2") pod "d1dc9219-aca3-47c5-b8f7-37799235c2a9" (UID: "d1dc9219-aca3-47c5-b8f7-37799235c2a9"). InnerVolumeSpecName "kube-api-access-2t6d2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.124435 4784 generic.go:334] "Generic (PLEG): container finished" podID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.124529 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2n9kz" event={"ID":"da17dffd-4ff8-4df2-8701-2e910a4c5131","Type":"ContainerDied","Data":"d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.145811 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1dc9219-aca3-47c5-b8f7-37799235c2a9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d1dc9219-aca3-47c5-b8f7-37799235c2a9" (UID: "d1dc9219-aca3-47c5-b8f7-37799235c2a9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.145924 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-4b287"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.147192 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-bqm59_5b70b310-f1bb-4b3b-b679-9c11f98367ee/openstack-network-exporter/0.log" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.147294 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.148604 4784 generic.go:334] "Generic (PLEG): container finished" podID="1c98eb91-7877-4dd7-b694-52b017726242" containerID="3c30f92b8011e87722d0ed074d9c419ca54128be08ee18cd99b32d3ef8974baf" exitCode=2 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.148692 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1c98eb91-7877-4dd7-b694-52b017726242","Type":"ContainerDied","Data":"3c30f92b8011e87722d0ed074d9c419ca54128be08ee18cd99b32d3ef8974baf"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.159404 4784 generic.go:334] "Generic (PLEG): container finished" podID="bed6a7b9-0069-4ea7-b813-70a5808d18db" containerID="b771c84b2fee4dfd30eea462159329d4aa16e7a25274eb645e61e328c8f50840" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.159490 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-849db5db7c-vjb4f" event={"ID":"bed6a7b9-0069-4ea7-b813-70a5808d18db","Type":"ContainerDied","Data":"b771c84b2fee4dfd30eea462159329d4aa16e7a25274eb645e61e328c8f50840"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.166057 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1dc9219-aca3-47c5-b8f7-37799235c2a9-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "d1dc9219-aca3-47c5-b8f7-37799235c2a9" (UID: "d1dc9219-aca3-47c5-b8f7-37799235c2a9"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.173115 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2t6d2\" (UniqueName: \"kubernetes.io/projected/d1dc9219-aca3-47c5-b8f7-37799235c2a9-kube-api-access-2t6d2\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.174781 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1dc9219-aca3-47c5-b8f7-37799235c2a9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.175031 4784 generic.go:334] "Generic (PLEG): container finished" podID="2585ada6-db24-4639-9f3b-d52919149935" containerID="fea8e4389874b9bc56d905c5919e4d5d64c1dda16388208e5bca742ee7cd64e7" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.175116 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2585ada6-db24-4639-9f3b-d52919149935","Type":"ContainerDied","Data":"fea8e4389874b9bc56d905c5919e4d5d64c1dda16388208e5bca742ee7cd64e7"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.180183 4784 generic.go:334] "Generic (PLEG): container finished" podID="fecd8c1e-482d-4469-a884-c357e0e66fe0" containerID="c7a53b94cb251c8ef3e62bbecc07389e6162337f8fbd7425b6a2aa4930128cb4" exitCode=143 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.180259 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-84c65dd87b-gpr7l"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.180352 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"fecd8c1e-482d-4469-a884-c357e0e66fe0","Type":"ContainerDied","Data":"c7a53b94cb251c8ef3e62bbecc07389e6162337f8fbd7425b6a2aa4930128cb4"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.180719 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-84c65dd87b-gpr7l" podUID="db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" containerName="barbican-api-log" containerID="cri-o://fb3044be153df30f3ff3fd00d5cb70d02783bb32a511ab902bb2119d0727fe42" gracePeriod=30 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.180757 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-84c65dd87b-gpr7l" podUID="db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" containerName="barbican-api" containerID="cri-o://6fffca0776ab46872f3102c7fc0a42be2362bb53a3780f33a08136d43b9b8eae" gracePeriod=30 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.193295 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-55f595f44f-tzkkl"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.193698 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-55f595f44f-tzkkl" podUID="8a5f283f-3491-4531-8213-b2c0eb6b3fc8" containerName="barbican-worker-log" containerID="cri-o://1bf390988559ad3f54dd862a9c758b447b84e4f67a158e2cd3efd23826681c28" gracePeriod=30 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.193969 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-55f595f44f-tzkkl" podUID="8a5f283f-3491-4531-8213-b2c0eb6b3fc8" containerName="barbican-worker" containerID="cri-o://1acfc09bce1e2ea954234f8dce2d5022e0b122a502d59c3a110c2b596a5f22d9" gracePeriod=30 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.203246 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_700a4853-cbd0-4cc6-8322-d9296caadf34/ovsdbserver-nb/0.log" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.203322 4784 generic.go:334] "Generic (PLEG): container finished" podID="700a4853-cbd0-4cc6-8322-d9296caadf34" containerID="a7e71ce3ce6e75c5e79d87518db38bd414fa98abc85f8580bc7cbfd40aaa0044" exitCode=2 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.203339 4784 generic.go:334] "Generic (PLEG): container finished" podID="700a4853-cbd0-4cc6-8322-d9296caadf34" containerID="2df75ac1bbb78adf7fae00bb5912e0ed3ad738ffa37d0df9b3d4f689510b22fa" exitCode=143 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.204040 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.204064 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1dc9219-aca3-47c5-b8f7-37799235c2a9-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "d1dc9219-aca3-47c5-b8f7-37799235c2a9" (UID: "d1dc9219-aca3-47c5-b8f7-37799235c2a9"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.204105 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"700a4853-cbd0-4cc6-8322-d9296caadf34","Type":"ContainerDied","Data":"a7e71ce3ce6e75c5e79d87518db38bd414fa98abc85f8580bc7cbfd40aaa0044"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.204144 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"700a4853-cbd0-4cc6-8322-d9296caadf34","Type":"ContainerDied","Data":"2df75ac1bbb78adf7fae00bb5912e0ed3ad738ffa37d0df9b3d4f689510b22fa"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.205464 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" podUID="3ddaa7ef-b912-4b5b-9bfa-820818220eef" containerName="barbican-keystone-listener-log" containerID="cri-o://18aae47178054e0d5a91d219ad4b268e6837bc33b481c8e49e5de6f2ab09b27f" gracePeriod=30 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.205647 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" podUID="3ddaa7ef-b912-4b5b-9bfa-820818220eef" containerName="barbican-keystone-listener" containerID="cri-o://e8dea317ca214f9ea6144a057e6bb7ef38cd17e2a3566ae30882d733e82bb07b" gracePeriod=30 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.226705 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ce785668-f5b3-4be6-b466-d1041d0190d1/ovsdbserver-sb/0.log" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.226777 4784 generic.go:334] "Generic (PLEG): container finished" podID="ce785668-f5b3-4be6-b466-d1041d0190d1" containerID="85ff42bd642efa429ae58eddee287d22c43869bed771a6ed4057e860b56b4123" exitCode=2 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.226797 4784 generic.go:334] "Generic (PLEG): container finished" podID="ce785668-f5b3-4be6-b466-d1041d0190d1" containerID="e1e26be922c57e54539f2aa8adb35b7ddeaf1093c84fe1a99379176abf764f01" exitCode=143 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.226871 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ce785668-f5b3-4be6-b466-d1041d0190d1","Type":"ContainerDied","Data":"85ff42bd642efa429ae58eddee287d22c43869bed771a6ed4057e860b56b4123"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.226931 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ce785668-f5b3-4be6-b466-d1041d0190d1","Type":"ContainerDied","Data":"e1e26be922c57e54539f2aa8adb35b7ddeaf1093c84fe1a99379176abf764f01"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.236445 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-c30b-account-create-update-lssn9"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.236902 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c78a-account-create-update-zjr59" event={"ID":"de90cd55-5430-48c7-b519-a4398e495607","Type":"ContainerStarted","Data":"a55ac9f5c9cd26f259fb83887e86410eaa008ee5d296ec60d38a21498620f27e"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.250222 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-c30b-account-create-update-lssn9"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.254729 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-n9rlv"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.258031 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-bqm59_5b70b310-f1bb-4b3b-b679-9c11f98367ee/openstack-network-exporter/0.log" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.258083 4784 generic.go:334] "Generic (PLEG): container finished" podID="5b70b310-f1bb-4b3b-b679-9c11f98367ee" containerID="45e04d5527cf93bae10ed29f13197ad0a095715926a5b063cac2ca9387bdb303" exitCode=2 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.258170 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-bqm59" event={"ID":"5b70b310-f1bb-4b3b-b679-9c11f98367ee","Type":"ContainerDied","Data":"45e04d5527cf93bae10ed29f13197ad0a095715926a5b063cac2ca9387bdb303"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.258228 4784 scope.go:117] "RemoveContainer" containerID="45e04d5527cf93bae10ed29f13197ad0a095715926a5b063cac2ca9387bdb303" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.258273 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-bqm59" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.260038 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d356-account-create-update-2prdj" event={"ID":"0a98e0fb-025b-4a3c-8bbb-2a2f20132e38","Type":"ContainerStarted","Data":"2acc1cdac69071e461e9e2ecd49fad8422a18ad8e39dd5342f45caa350e8fd07"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.262663 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="32e811b4-672d-4aa2-905b-9406f594be5c" containerName="galera" containerID="cri-o://a04341a51a59aa6ffcaa076aa65515a7b5edcbbb44d3b167585185546814e56a" gracePeriod=30 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.269695 4784 generic.go:334] "Generic (PLEG): container finished" podID="8e67aeba-582a-470f-a40f-e1def33f01d2" containerID="26a913b7bf6de54902c4672761268a5ffa207b1d5eb9d15f8383c8ef9d0df22f" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.269780 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" event={"ID":"8e67aeba-582a-470f-a40f-e1def33f01d2","Type":"ContainerDied","Data":"26a913b7bf6de54902c4672761268a5ffa207b1d5eb9d15f8383c8ef9d0df22f"} Jan 06 08:38:28 crc kubenswrapper[4784]: E0106 08:38:28.271294 4784 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 06 08:38:28 crc kubenswrapper[4784]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 06 08:38:28 crc kubenswrapper[4784]: Jan 06 08:38:28 crc kubenswrapper[4784]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 06 08:38:28 crc kubenswrapper[4784]: Jan 06 08:38:28 crc kubenswrapper[4784]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 06 08:38:28 crc kubenswrapper[4784]: Jan 06 08:38:28 crc kubenswrapper[4784]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 06 08:38:28 crc kubenswrapper[4784]: Jan 06 08:38:28 crc kubenswrapper[4784]: if [ -n "barbican" ]; then Jan 06 08:38:28 crc kubenswrapper[4784]: GRANT_DATABASE="barbican" Jan 06 08:38:28 crc kubenswrapper[4784]: else Jan 06 08:38:28 crc kubenswrapper[4784]: GRANT_DATABASE="*" Jan 06 08:38:28 crc kubenswrapper[4784]: fi Jan 06 08:38:28 crc kubenswrapper[4784]: Jan 06 08:38:28 crc kubenswrapper[4784]: # going for maximum compatibility here: Jan 06 08:38:28 crc kubenswrapper[4784]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 06 08:38:28 crc kubenswrapper[4784]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 06 08:38:28 crc kubenswrapper[4784]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 06 08:38:28 crc kubenswrapper[4784]: # support updates Jan 06 08:38:28 crc kubenswrapper[4784]: Jan 06 08:38:28 crc kubenswrapper[4784]: $MYSQL_CMD < logger="UnhandledError" Jan 06 08:38:28 crc kubenswrapper[4784]: E0106 08:38:28.272477 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"barbican-db-secret\\\" not found\"" pod="openstack/barbican-d356-account-create-update-2prdj" podUID="0a98e0fb-025b-4a3c-8bbb-2a2f20132e38" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.277410 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b70b310-f1bb-4b3b-b679-9c11f98367ee-combined-ca-bundle\") pod \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.277510 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/5b70b310-f1bb-4b3b-b679-9c11f98367ee-ovs-rundir\") pod \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.278360 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5b70b310-f1bb-4b3b-b679-9c11f98367ee-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "5b70b310-f1bb-4b3b-b679-9c11f98367ee" (UID: "5b70b310-f1bb-4b3b-b679-9c11f98367ee"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.278486 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b70b310-f1bb-4b3b-b679-9c11f98367ee-metrics-certs-tls-certs\") pod \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.279134 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/5b70b310-f1bb-4b3b-b679-9c11f98367ee-ovn-rundir\") pod \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.279219 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n8pvf\" (UniqueName: \"kubernetes.io/projected/5b70b310-f1bb-4b3b-b679-9c11f98367ee-kube-api-access-n8pvf\") pod \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.279304 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b70b310-f1bb-4b3b-b679-9c11f98367ee-config\") pod \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\" (UID: \"5b70b310-f1bb-4b3b-b679-9c11f98367ee\") " Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.280020 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5b70b310-f1bb-4b3b-b679-9c11f98367ee-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "5b70b310-f1bb-4b3b-b679-9c11f98367ee" (UID: "5b70b310-f1bb-4b3b-b679-9c11f98367ee"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.280269 4784 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d1dc9219-aca3-47c5-b8f7-37799235c2a9-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.280300 4784 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d1dc9219-aca3-47c5-b8f7-37799235c2a9-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.280313 4784 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/5b70b310-f1bb-4b3b-b679-9c11f98367ee-ovs-rundir\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.281298 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b70b310-f1bb-4b3b-b679-9c11f98367ee-config" (OuterVolumeSpecName: "config") pod "5b70b310-f1bb-4b3b-b679-9c11f98367ee" (UID: "5b70b310-f1bb-4b3b-b679-9c11f98367ee"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.286715 4784 generic.go:334] "Generic (PLEG): container finished" podID="0ef35db6-a440-4394-a26f-750a29488828" containerID="18606de9cd379cec6d03e611706882cefcfe82d03d59dd6bd082573a183009d9" exitCode=0 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.286864 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8tvjg" event={"ID":"0ef35db6-a440-4394-a26f-750a29488828","Type":"ContainerDied","Data":"18606de9cd379cec6d03e611706882cefcfe82d03d59dd6bd082573a183009d9"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.290177 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-ddd99f6b5-9vfkd"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.290534 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" podUID="ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" containerName="proxy-httpd" containerID="cri-o://8885e8dca6c2747b87500b9c0f62e03258c8d67a971af2cba425f6a3b53371bf" gracePeriod=30 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.290755 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" podUID="ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" containerName="proxy-server" containerID="cri-o://bc9bbb81f969e4ccb4a50e67c4b725fd7aaad864e06456377517ca86818bce99" gracePeriod=30 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.295761 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b70b310-f1bb-4b3b-b679-9c11f98367ee-kube-api-access-n8pvf" (OuterVolumeSpecName: "kube-api-access-n8pvf") pod "5b70b310-f1bb-4b3b-b679-9c11f98367ee" (UID: "5b70b310-f1bb-4b3b-b679-9c11f98367ee"). InnerVolumeSpecName "kube-api-access-n8pvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.312042 4784 generic.go:334] "Generic (PLEG): container finished" podID="6cd2b801-83a4-410f-a555-8dfda270713a" containerID="07f60e1630bdcd2399c45169de65854f62b21633e41ebfe502b28d51d39226cc" exitCode=143 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.312111 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5759d5cbc4-2r87d" event={"ID":"6cd2b801-83a4-410f-a555-8dfda270713a","Type":"ContainerDied","Data":"07f60e1630bdcd2399c45169de65854f62b21633e41ebfe502b28d51d39226cc"} Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.361686 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="036cffd0-4911-4b85-b573-5aefd8bd124a" path="/var/lib/kubelet/pods/036cffd0-4911-4b85-b573-5aefd8bd124a/volumes" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.362774 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dd652b3-9755-47c0-a4cc-c39c86d840f3" path="/var/lib/kubelet/pods/0dd652b3-9755-47c0-a4cc-c39c86d840f3/volumes" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.363443 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c994335-9c58-474f-8192-040dfb912747" path="/var/lib/kubelet/pods/1c994335-9c58-474f-8192-040dfb912747/volumes" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.364750 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3da71d4b-c3fe-4b9e-b6a7-63ababb2632c" path="/var/lib/kubelet/pods/3da71d4b-c3fe-4b9e-b6a7-63ababb2632c/volumes" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.365438 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45abd702-0d1f-4e81-b043-9cbd8ed1591b" path="/var/lib/kubelet/pods/45abd702-0d1f-4e81-b043-9cbd8ed1591b/volumes" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.366225 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="778d4ca4-6cef-45a6-8870-657c2c578797" path="/var/lib/kubelet/pods/778d4ca4-6cef-45a6-8870-657c2c578797/volumes" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.366953 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a172939-8d39-40a2-9b41-1eee48de7be5" path="/var/lib/kubelet/pods/7a172939-8d39-40a2-9b41-1eee48de7be5/volumes" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.368491 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bb037f9-c2c5-4d71-af96-2c1ce93f720a" path="/var/lib/kubelet/pods/8bb037f9-c2c5-4d71-af96-2c1ce93f720a/volumes" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.369290 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93749da5-57fd-4761-aa84-95c9cec12e52" path="/var/lib/kubelet/pods/93749da5-57fd-4761-aa84-95c9cec12e52/volumes" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.370855 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a29c392c-7fa2-4a80-b072-92b8201616b8" path="/var/lib/kubelet/pods/a29c392c-7fa2-4a80-b072-92b8201616b8/volumes" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.371923 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7af75ef-c428-4d9b-8887-4576bc478e80" path="/var/lib/kubelet/pods/a7af75ef-c428-4d9b-8887-4576bc478e80/volumes" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.373040 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4311b21-f94c-4300-aaa2-dd1cea584334" path="/var/lib/kubelet/pods/b4311b21-f94c-4300-aaa2-dd1cea584334/volumes" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.373936 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1dc9219-aca3-47c5-b8f7-37799235c2a9" path="/var/lib/kubelet/pods/d1dc9219-aca3-47c5-b8f7-37799235c2a9/volumes" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.375514 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9ede4c2-499b-4c2c-8aa5-11344298ebae" path="/var/lib/kubelet/pods/e9ede4c2-499b-4c2c-8aa5-11344298ebae/volumes" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.376395 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f091be74-b0f9-4291-9202-20e877c55b30" path="/var/lib/kubelet/pods/f091be74-b0f9-4291-9202-20e877c55b30/volumes" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.382272 4784 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/5b70b310-f1bb-4b3b-b679-9c11f98367ee-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.382318 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n8pvf\" (UniqueName: \"kubernetes.io/projected/5b70b310-f1bb-4b3b-b679-9c11f98367ee-kube-api-access-n8pvf\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.382331 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b70b310-f1bb-4b3b-b679-9c11f98367ee-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.385958 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-n9rlv"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.386010 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.386032 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.386049 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-r2bgl"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.386063 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-r2bgl"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.386366 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="19f8ed37-5996-433b-9915-97489c1d8f11" containerName="nova-api-log" containerID="cri-o://501fd29e0c03aad226668f3a9429c1898d430f716269513ca50915d1f53a01da" gracePeriod=30 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.386811 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="19f8ed37-5996-433b-9915-97489c1d8f11" containerName="nova-api-api" containerID="cri-o://7947cb94b8d88ad4df644dc547771879ad687dece99ce064512f4521ede7a217" gracePeriod=30 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.387604 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-c594-account-create-update-qhcs2"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.463830 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-8xwm9"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.475216 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-8xwm9"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.475396 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b70b310-f1bb-4b3b-b679-9c11f98367ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5b70b310-f1bb-4b3b-b679-9c11f98367ee" (UID: "5b70b310-f1bb-4b3b-b679-9c11f98367ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.482127 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-361f-account-create-update-94fnx"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.483963 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b70b310-f1bb-4b3b-b679-9c11f98367ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.496803 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-kxmhp"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.500012 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="41c89df0-d35f-4f47-86f3-71a2c0971d79" containerName="rabbitmq" containerID="cri-o://d32e01d069e2a7fe432e20265968b48ead1ba6a001b6421c4e55bfdf12b10616" gracePeriod=604800 Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.507797 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-sx6jh"] Jan 06 08:38:28 crc kubenswrapper[4784]: I0106 08:38:28.549264 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b70b310-f1bb-4b3b-b679-9c11f98367ee-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "5b70b310-f1bb-4b3b-b679-9c11f98367ee" (UID: "5b70b310-f1bb-4b3b-b679-9c11f98367ee"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:28.637293 4784 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:28.641930 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data podName:052ecaa6-58fd-42ed-b2c5-6b8919470619 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:30.64188171 +0000 UTC m=+1412.688054547 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data") pod "rabbitmq-server-0" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619") : configmap "rabbitmq-config-data" not found Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.637521 4784 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b70b310-f1bb-4b3b-b679-9c11f98367ee-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.686638 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-7e75-account-create-update-95l2w"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.761639 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-kxmhp"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.775333 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.797671 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.809695 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-c78a-account-create-update-zjr59"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.821793 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-d356-account-create-update-2prdj"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.831042 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-sx6jh"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.837913 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-p6pg6"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.847753 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-p6pg6"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.855648 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.855931 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="4dd5733d-6502-4030-a012-be296b7d11c1" containerName="nova-cell0-conductor-conductor" containerID="cri-o://c2c0ff5bb0a05540e0092ee7d0986a984f68d0ed56bed0238c7fd744e6c37ee5" gracePeriod=30 Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.868193 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-2wpc6"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.879666 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-2wpc6"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.890871 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.891135 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="d1e87443-2d75-4063-934c-dc593d03987c" containerName="nova-scheduler-scheduler" containerID="cri-o://69445d35f26da5bf10ec6be0ee60f62f15246b38e92ee1a9507d816397d15902" gracePeriod=30 Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.932238 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8tvjg" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.950042 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_700a4853-cbd0-4cc6-8322-d9296caadf34/ovsdbserver-nb/0.log" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.950141 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.955122 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="052ecaa6-58fd-42ed-b2c5-6b8919470619" containerName="rabbitmq" containerID="cri-o://f24ed2b7a2fbd1b9b4d0209b2b0448142937b3cd525d3833238d00d846deb5fc" gracePeriod=604800 Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.961041 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-dns-swift-storage-0\") pod \"8e67aeba-582a-470f-a40f-e1def33f01d2\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.961111 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfc58\" (UniqueName: \"kubernetes.io/projected/8e67aeba-582a-470f-a40f-e1def33f01d2-kube-api-access-qfc58\") pod \"8e67aeba-582a-470f-a40f-e1def33f01d2\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.961272 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-dns-svc\") pod \"8e67aeba-582a-470f-a40f-e1def33f01d2\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.961313 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-ovsdbserver-sb\") pod \"8e67aeba-582a-470f-a40f-e1def33f01d2\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.961333 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-config\") pod \"8e67aeba-582a-470f-a40f-e1def33f01d2\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.961354 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-ovsdbserver-nb\") pod \"8e67aeba-582a-470f-a40f-e1def33f01d2\" (UID: \"8e67aeba-582a-470f-a40f-e1def33f01d2\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.971027 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.975816 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-bqm59"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.987419 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e67aeba-582a-470f-a40f-e1def33f01d2-kube-api-access-qfc58" (OuterVolumeSpecName: "kube-api-access-qfc58") pod "8e67aeba-582a-470f-a40f-e1def33f01d2" (UID: "8e67aeba-582a-470f-a40f-e1def33f01d2"). InnerVolumeSpecName "kube-api-access-qfc58". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:28.993067 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-bqm59"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.031530 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ce785668-f5b3-4be6-b466-d1041d0190d1/ovsdbserver-sb/0.log" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.031704 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.053466 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8e67aeba-582a-470f-a40f-e1def33f01d2" (UID: "8e67aeba-582a-470f-a40f-e1def33f01d2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.057573 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-config" (OuterVolumeSpecName: "config") pod "8e67aeba-582a-470f-a40f-e1def33f01d2" (UID: "8e67aeba-582a-470f-a40f-e1def33f01d2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.063360 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"700a4853-cbd0-4cc6-8322-d9296caadf34\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.063439 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/700a4853-cbd0-4cc6-8322-d9296caadf34-config\") pod \"700a4853-cbd0-4cc6-8322-d9296caadf34\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.063475 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2hx2j\" (UniqueName: \"kubernetes.io/projected/700a4853-cbd0-4cc6-8322-d9296caadf34-kube-api-access-2hx2j\") pod \"700a4853-cbd0-4cc6-8322-d9296caadf34\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.063498 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-run\") pod \"0ef35db6-a440-4394-a26f-750a29488828\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.063562 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-log-ovn\") pod \"0ef35db6-a440-4394-a26f-750a29488828\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.066717 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-metrics-certs-tls-certs\") pod \"700a4853-cbd0-4cc6-8322-d9296caadf34\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.066769 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-combined-ca-bundle\") pod \"700a4853-cbd0-4cc6-8322-d9296caadf34\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.066798 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/700a4853-cbd0-4cc6-8322-d9296caadf34-ovsdb-rundir\") pod \"700a4853-cbd0-4cc6-8322-d9296caadf34\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.066834 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/700a4853-cbd0-4cc6-8322-d9296caadf34-scripts\") pod \"700a4853-cbd0-4cc6-8322-d9296caadf34\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.066837 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/700a4853-cbd0-4cc6-8322-d9296caadf34-config" (OuterVolumeSpecName: "config") pod "700a4853-cbd0-4cc6-8322-d9296caadf34" (UID: "700a4853-cbd0-4cc6-8322-d9296caadf34"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.066868 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ef35db6-a440-4394-a26f-750a29488828-ovn-controller-tls-certs\") pod \"0ef35db6-a440-4394-a26f-750a29488828\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.066921 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0ef35db6-a440-4394-a26f-750a29488828-scripts\") pod \"0ef35db6-a440-4394-a26f-750a29488828\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.066971 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ef35db6-a440-4394-a26f-750a29488828-combined-ca-bundle\") pod \"0ef35db6-a440-4394-a26f-750a29488828\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.066988 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-run-ovn\") pod \"0ef35db6-a440-4394-a26f-750a29488828\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.067037 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8qbxq\" (UniqueName: \"kubernetes.io/projected/0ef35db6-a440-4394-a26f-750a29488828-kube-api-access-8qbxq\") pod \"0ef35db6-a440-4394-a26f-750a29488828\" (UID: \"0ef35db6-a440-4394-a26f-750a29488828\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.067060 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-ovsdbserver-nb-tls-certs\") pod \"700a4853-cbd0-4cc6-8322-d9296caadf34\" (UID: \"700a4853-cbd0-4cc6-8322-d9296caadf34\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.068306 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "0ef35db6-a440-4394-a26f-750a29488828" (UID: "0ef35db6-a440-4394-a26f-750a29488828"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.068360 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-run" (OuterVolumeSpecName: "var-run") pod "0ef35db6-a440-4394-a26f-750a29488828" (UID: "0ef35db6-a440-4394-a26f-750a29488828"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.068390 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.068889 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.068907 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfc58\" (UniqueName: \"kubernetes.io/projected/8e67aeba-582a-470f-a40f-e1def33f01d2-kube-api-access-qfc58\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.068918 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/700a4853-cbd0-4cc6-8322-d9296caadf34-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.069765 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "700a4853-cbd0-4cc6-8322-d9296caadf34" (UID: "700a4853-cbd0-4cc6-8322-d9296caadf34"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.071714 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/700a4853-cbd0-4cc6-8322-d9296caadf34-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "700a4853-cbd0-4cc6-8322-d9296caadf34" (UID: "700a4853-cbd0-4cc6-8322-d9296caadf34"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.071906 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "0ef35db6-a440-4394-a26f-750a29488828" (UID: "0ef35db6-a440-4394-a26f-750a29488828"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.075527 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ef35db6-a440-4394-a26f-750a29488828-scripts" (OuterVolumeSpecName: "scripts") pod "0ef35db6-a440-4394-a26f-750a29488828" (UID: "0ef35db6-a440-4394-a26f-750a29488828"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.076629 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/700a4853-cbd0-4cc6-8322-d9296caadf34-scripts" (OuterVolumeSpecName: "scripts") pod "700a4853-cbd0-4cc6-8322-d9296caadf34" (UID: "700a4853-cbd0-4cc6-8322-d9296caadf34"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.079211 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="69445d35f26da5bf10ec6be0ee60f62f15246b38e92ee1a9507d816397d15902" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.081721 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/700a4853-cbd0-4cc6-8322-d9296caadf34-kube-api-access-2hx2j" (OuterVolumeSpecName: "kube-api-access-2hx2j") pod "700a4853-cbd0-4cc6-8322-d9296caadf34" (UID: "700a4853-cbd0-4cc6-8322-d9296caadf34"). InnerVolumeSpecName "kube-api-access-2hx2j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.082252 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ef35db6-a440-4394-a26f-750a29488828-kube-api-access-8qbxq" (OuterVolumeSpecName: "kube-api-access-8qbxq") pod "0ef35db6-a440-4394-a26f-750a29488828" (UID: "0ef35db6-a440-4394-a26f-750a29488828"). InnerVolumeSpecName "kube-api-access-8qbxq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.082596 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="69445d35f26da5bf10ec6be0ee60f62f15246b38e92ee1a9507d816397d15902" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.101989 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="69445d35f26da5bf10ec6be0ee60f62f15246b38e92ee1a9507d816397d15902" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.102086 4784 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="d1e87443-2d75-4063-934c-dc593d03987c" containerName="nova-scheduler-scheduler" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.107320 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8e67aeba-582a-470f-a40f-e1def33f01d2" (UID: "8e67aeba-582a-470f-a40f-e1def33f01d2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.110825 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "700a4853-cbd0-4cc6-8322-d9296caadf34" (UID: "700a4853-cbd0-4cc6-8322-d9296caadf34"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.112035 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ef35db6-a440-4394-a26f-750a29488828-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0ef35db6-a440-4394-a26f-750a29488828" (UID: "0ef35db6-a440-4394-a26f-750a29488828"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.112772 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8e67aeba-582a-470f-a40f-e1def33f01d2" (UID: "8e67aeba-582a-470f-a40f-e1def33f01d2"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.136318 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8e67aeba-582a-470f-a40f-e1def33f01d2" (UID: "8e67aeba-582a-470f-a40f-e1def33f01d2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.168853 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ef35db6-a440-4394-a26f-750a29488828-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "0ef35db6-a440-4394-a26f-750a29488828" (UID: "0ef35db6-a440-4394-a26f-750a29488828"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.169813 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-config-data\") pod \"2585ada6-db24-4639-9f3b-d52919149935\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.169879 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-ovsdbserver-sb-tls-certs\") pod \"ce785668-f5b3-4be6-b466-d1041d0190d1\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.169981 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-metrics-certs-tls-certs\") pod \"ce785668-f5b3-4be6-b466-d1041d0190d1\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.170046 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-combined-ca-bundle\") pod \"ce785668-f5b3-4be6-b466-d1041d0190d1\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.170160 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ce785668-f5b3-4be6-b466-d1041d0190d1\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.170192 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hz2gr\" (UniqueName: \"kubernetes.io/projected/2585ada6-db24-4639-9f3b-d52919149935-kube-api-access-hz2gr\") pod \"2585ada6-db24-4639-9f3b-d52919149935\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.170250 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce785668-f5b3-4be6-b466-d1041d0190d1-scripts\") pod \"ce785668-f5b3-4be6-b466-d1041d0190d1\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.170283 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6n9sb\" (UniqueName: \"kubernetes.io/projected/ce785668-f5b3-4be6-b466-d1041d0190d1-kube-api-access-6n9sb\") pod \"ce785668-f5b3-4be6-b466-d1041d0190d1\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.170322 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce785668-f5b3-4be6-b466-d1041d0190d1-config\") pod \"ce785668-f5b3-4be6-b466-d1041d0190d1\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.170356 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ce785668-f5b3-4be6-b466-d1041d0190d1-ovsdb-rundir\") pod \"ce785668-f5b3-4be6-b466-d1041d0190d1\" (UID: \"ce785668-f5b3-4be6-b466-d1041d0190d1\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.170422 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-nova-novncproxy-tls-certs\") pod \"2585ada6-db24-4639-9f3b-d52919149935\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.170469 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-vencrypt-tls-certs\") pod \"2585ada6-db24-4639-9f3b-d52919149935\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.170530 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-combined-ca-bundle\") pod \"2585ada6-db24-4639-9f3b-d52919149935\" (UID: \"2585ada6-db24-4639-9f3b-d52919149935\") " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.171320 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce785668-f5b3-4be6-b466-d1041d0190d1-scripts" (OuterVolumeSpecName: "scripts") pod "ce785668-f5b3-4be6-b466-d1041d0190d1" (UID: "ce785668-f5b3-4be6-b466-d1041d0190d1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.171558 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce785668-f5b3-4be6-b466-d1041d0190d1-config" (OuterVolumeSpecName: "config") pod "ce785668-f5b3-4be6-b466-d1041d0190d1" (UID: "ce785668-f5b3-4be6-b466-d1041d0190d1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172132 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ef35db6-a440-4394-a26f-750a29488828-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172168 4784 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172181 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8qbxq\" (UniqueName: \"kubernetes.io/projected/0ef35db6-a440-4394-a26f-750a29488828-kube-api-access-8qbxq\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172193 4784 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172243 4784 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172255 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2hx2j\" (UniqueName: \"kubernetes.io/projected/700a4853-cbd0-4cc6-8322-d9296caadf34-kube-api-access-2hx2j\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172265 4784 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-run\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172274 4784 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0ef35db6-a440-4394-a26f-750a29488828-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172283 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172292 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172301 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/700a4853-cbd0-4cc6-8322-d9296caadf34-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172328 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e67aeba-582a-470f-a40f-e1def33f01d2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172338 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/700a4853-cbd0-4cc6-8322-d9296caadf34-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172346 4784 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ef35db6-a440-4394-a26f-750a29488828-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172355 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce785668-f5b3-4be6-b466-d1041d0190d1-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172363 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce785668-f5b3-4be6-b466-d1041d0190d1-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.172371 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0ef35db6-a440-4394-a26f-750a29488828-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.173822 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce785668-f5b3-4be6-b466-d1041d0190d1-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "ce785668-f5b3-4be6-b466-d1041d0190d1" (UID: "ce785668-f5b3-4be6-b466-d1041d0190d1"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.176352 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "ce785668-f5b3-4be6-b466-d1041d0190d1" (UID: "ce785668-f5b3-4be6-b466-d1041d0190d1"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.177849 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2585ada6-db24-4639-9f3b-d52919149935-kube-api-access-hz2gr" (OuterVolumeSpecName: "kube-api-access-hz2gr") pod "2585ada6-db24-4639-9f3b-d52919149935" (UID: "2585ada6-db24-4639-9f3b-d52919149935"). InnerVolumeSpecName "kube-api-access-hz2gr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.198992 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce785668-f5b3-4be6-b466-d1041d0190d1-kube-api-access-6n9sb" (OuterVolumeSpecName: "kube-api-access-6n9sb") pod "ce785668-f5b3-4be6-b466-d1041d0190d1" (UID: "ce785668-f5b3-4be6-b466-d1041d0190d1"). InnerVolumeSpecName "kube-api-access-6n9sb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.206812 4784 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.209004 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "700a4853-cbd0-4cc6-8322-d9296caadf34" (UID: "700a4853-cbd0-4cc6-8322-d9296caadf34"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.233123 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2585ada6-db24-4639-9f3b-d52919149935" (UID: "2585ada6-db24-4639-9f3b-d52919149935"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.248258 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" podUID="ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" containerName="proxy-server" probeResult="failure" output="Get \"https://10.217.0.169:8080/healthcheck\": dial tcp 10.217.0.169:8080: connect: connection refused" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.248317 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" podUID="ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.169:8080/healthcheck\": dial tcp 10.217.0.169:8080: connect: connection refused" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.269436 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-config-data" (OuterVolumeSpecName: "config-data") pod "2585ada6-db24-4639-9f3b-d52919149935" (UID: "2585ada6-db24-4639-9f3b-d52919149935"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.271198 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ce785668-f5b3-4be6-b466-d1041d0190d1" (UID: "ce785668-f5b3-4be6-b466-d1041d0190d1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.281990 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "700a4853-cbd0-4cc6-8322-d9296caadf34" (UID: "700a4853-cbd0-4cc6-8322-d9296caadf34"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.282241 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.282267 4784 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.282300 4784 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.282314 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hz2gr\" (UniqueName: \"kubernetes.io/projected/2585ada6-db24-4639-9f3b-d52919149935-kube-api-access-hz2gr\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.282328 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6n9sb\" (UniqueName: \"kubernetes.io/projected/ce785668-f5b3-4be6-b466-d1041d0190d1-kube-api-access-6n9sb\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.282341 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ce785668-f5b3-4be6-b466-d1041d0190d1-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.282354 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/700a4853-cbd0-4cc6-8322-d9296caadf34-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.282366 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.282378 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.282389 4784 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.293816 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "2585ada6-db24-4639-9f3b-d52919149935" (UID: "2585ada6-db24-4639-9f3b-d52919149935"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.308620 4784 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.311637 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "2585ada6-db24-4639-9f3b-d52919149935" (UID: "2585ada6-db24-4639-9f3b-d52919149935"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.328774 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "ce785668-f5b3-4be6-b466-d1041d0190d1" (UID: "ce785668-f5b3-4be6-b466-d1041d0190d1"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.329214 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ce785668-f5b3-4be6-b466-d1041d0190d1/ovsdbserver-sb/0.log" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.329363 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.329370 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ce785668-f5b3-4be6-b466-d1041d0190d1","Type":"ContainerDied","Data":"1b8c82e0d4727bf379a7fad1966e194ec6268615fe0db644593629019274a94f"} Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.329448 4784 scope.go:117] "RemoveContainer" containerID="85ff42bd642efa429ae58eddee287d22c43869bed771a6ed4057e860b56b4123" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.335391 4784 generic.go:334] "Generic (PLEG): container finished" podID="19f8ed37-5996-433b-9915-97489c1d8f11" containerID="501fd29e0c03aad226668f3a9429c1898d430f716269513ca50915d1f53a01da" exitCode=143 Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.335471 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"19f8ed37-5996-433b-9915-97489c1d8f11","Type":"ContainerDied","Data":"501fd29e0c03aad226668f3a9429c1898d430f716269513ca50915d1f53a01da"} Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.345172 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-sx6jh" event={"ID":"f364fb9d-ca92-487a-9e6f-6d85a97117d0","Type":"ContainerStarted","Data":"f36fb7f267e93c7667782d8010e59e423efd806358dba3656169e514e93d72a2"} Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.345761 4784 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/root-account-create-update-sx6jh" secret="" err="secret \"galera-openstack-cell1-dockercfg-hqzpp\" not found" Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.349157 4784 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 06 08:38:29 crc kubenswrapper[4784]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 06 08:38:29 crc kubenswrapper[4784]: Jan 06 08:38:29 crc kubenswrapper[4784]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 06 08:38:29 crc kubenswrapper[4784]: Jan 06 08:38:29 crc kubenswrapper[4784]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 06 08:38:29 crc kubenswrapper[4784]: Jan 06 08:38:29 crc kubenswrapper[4784]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 06 08:38:29 crc kubenswrapper[4784]: Jan 06 08:38:29 crc kubenswrapper[4784]: if [ -n "" ]; then Jan 06 08:38:29 crc kubenswrapper[4784]: GRANT_DATABASE="" Jan 06 08:38:29 crc kubenswrapper[4784]: else Jan 06 08:38:29 crc kubenswrapper[4784]: GRANT_DATABASE="*" Jan 06 08:38:29 crc kubenswrapper[4784]: fi Jan 06 08:38:29 crc kubenswrapper[4784]: Jan 06 08:38:29 crc kubenswrapper[4784]: # going for maximum compatibility here: Jan 06 08:38:29 crc kubenswrapper[4784]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 06 08:38:29 crc kubenswrapper[4784]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 06 08:38:29 crc kubenswrapper[4784]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 06 08:38:29 crc kubenswrapper[4784]: # support updates Jan 06 08:38:29 crc kubenswrapper[4784]: Jan 06 08:38:29 crc kubenswrapper[4784]: $MYSQL_CMD < logger="UnhandledError" Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.350254 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-sx6jh" podUID="f364fb9d-ca92-487a-9e6f-6d85a97117d0" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.350929 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2585ada6-db24-4639-9f3b-d52919149935","Type":"ContainerDied","Data":"3a704d27ae2f01d7db2bb0f948fd9bf68d3f5ec6d06280fb5e4ed4e0bb1d5730"} Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.350988 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.354455 4784 generic.go:334] "Generic (PLEG): container finished" podID="79cac775-c143-4370-bf3b-b25e2ca62120" containerID="fd90bc399e2a254f94bbf15631cc9bd41d01a3e85ba8564aafc267bb679d9db4" exitCode=143 Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.354568 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"79cac775-c143-4370-bf3b-b25e2ca62120","Type":"ContainerDied","Data":"fd90bc399e2a254f94bbf15631cc9bd41d01a3e85ba8564aafc267bb679d9db4"} Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.373678 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "ce785668-f5b3-4be6-b466-d1041d0190d1" (UID: "ce785668-f5b3-4be6-b466-d1041d0190d1"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.375388 4784 scope.go:117] "RemoveContainer" containerID="e1e26be922c57e54539f2aa8adb35b7ddeaf1093c84fe1a99379176abf764f01" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.380356 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" event={"ID":"8e67aeba-582a-470f-a40f-e1def33f01d2","Type":"ContainerDied","Data":"a477e09c75e922220ce7f9da468f75feb10fd91005ee8a0aa7b8159d7e9c6933"} Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.380441 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.389498 4784 generic.go:334] "Generic (PLEG): container finished" podID="8a5f283f-3491-4531-8213-b2c0eb6b3fc8" containerID="1bf390988559ad3f54dd862a9c758b447b84e4f67a158e2cd3efd23826681c28" exitCode=143 Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.389601 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-55f595f44f-tzkkl" event={"ID":"8a5f283f-3491-4531-8213-b2c0eb6b3fc8","Type":"ContainerDied","Data":"1bf390988559ad3f54dd862a9c758b447b84e4f67a158e2cd3efd23826681c28"} Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.389931 4784 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.389959 4784 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.389973 4784 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2585ada6-db24-4639-9f3b-d52919149935-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.389989 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.390002 4784 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce785668-f5b3-4be6-b466-d1041d0190d1-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.390076 4784 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.390138 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f364fb9d-ca92-487a-9e6f-6d85a97117d0-operator-scripts podName:f364fb9d-ca92-487a-9e6f-6d85a97117d0 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:29.89011384 +0000 UTC m=+1411.936286677 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/f364fb9d-ca92-487a-9e6f-6d85a97117d0-operator-scripts") pod "root-account-create-update-sx6jh" (UID: "f364fb9d-ca92-487a-9e6f-6d85a97117d0") : configmap "openstack-cell1-scripts" not found Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.397718 4784 generic.go:334] "Generic (PLEG): container finished" podID="ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" containerID="bc9bbb81f969e4ccb4a50e67c4b725fd7aaad864e06456377517ca86818bce99" exitCode=0 Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.397753 4784 generic.go:334] "Generic (PLEG): container finished" podID="ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" containerID="8885e8dca6c2747b87500b9c0f62e03258c8d67a971af2cba425f6a3b53371bf" exitCode=0 Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.397805 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" event={"ID":"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2","Type":"ContainerDied","Data":"bc9bbb81f969e4ccb4a50e67c4b725fd7aaad864e06456377517ca86818bce99"} Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.397842 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" event={"ID":"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2","Type":"ContainerDied","Data":"8885e8dca6c2747b87500b9c0f62e03258c8d67a971af2cba425f6a3b53371bf"} Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.428840 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.445881 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.447840 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"869eb46b39bba54be94fbd147453143836362ba61706362d4c5f22a8bd537f78"} Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.447386 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2bc0281-fc27-4766-87fa-f16599938e96" containerID="869eb46b39bba54be94fbd147453143836362ba61706362d4c5f22a8bd537f78" exitCode=0 Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.455093 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-xt7gs"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.456470 4784 generic.go:334] "Generic (PLEG): container finished" podID="3ddaa7ef-b912-4b5b-9bfa-820818220eef" containerID="18aae47178054e0d5a91d219ad4b268e6837bc33b481c8e49e5de6f2ab09b27f" exitCode=143 Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.456569 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" event={"ID":"3ddaa7ef-b912-4b5b-9bfa-820818220eef","Type":"ContainerDied","Data":"18aae47178054e0d5a91d219ad4b268e6837bc33b481c8e49e5de6f2ab09b27f"} Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.458009 4784 scope.go:117] "RemoveContainer" containerID="fea8e4389874b9bc56d905c5919e4d5d64c1dda16388208e5bca742ee7cd64e7" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.465252 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-xt7gs"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.474423 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8tvjg" event={"ID":"0ef35db6-a440-4394-a26f-750a29488828","Type":"ContainerDied","Data":"1b54fdb7264ff641fcf407364676075ad525b00b1f338e708bcc2088e2b91304"} Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.474527 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8tvjg" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.505241 4784 generic.go:334] "Generic (PLEG): container finished" podID="db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" containerID="fb3044be153df30f3ff3fd00d5cb70d02783bb32a511ab902bb2119d0727fe42" exitCode=143 Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.505490 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84c65dd87b-gpr7l" event={"ID":"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1","Type":"ContainerDied","Data":"fb3044be153df30f3ff3fd00d5cb70d02783bb32a511ab902bb2119d0727fe42"} Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.524623 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_700a4853-cbd0-4cc6-8322-d9296caadf34/ovsdbserver-nb/0.log" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.525024 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"700a4853-cbd0-4cc6-8322-d9296caadf34","Type":"ContainerDied","Data":"63af77200422878343a76de44c720e0eefbed1145e23a334095415ffe6523383"} Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.525224 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.533659 4784 generic.go:334] "Generic (PLEG): container finished" podID="162189cc-1d37-4526-b83c-f36183f40b49" containerID="254f5436d55af633a01d076f3a43e18f1370e7e15307f5c1d0c79c703303c8f9" exitCode=0 Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.533780 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"162189cc-1d37-4526-b83c-f36183f40b49","Type":"ContainerDied","Data":"254f5436d55af633a01d076f3a43e18f1370e7e15307f5c1d0c79c703303c8f9"} Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.596306 4784 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.596394 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data podName:41c89df0-d35f-4f47-86f3-71a2c0971d79 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:33.596364794 +0000 UTC m=+1415.642537691 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data") pod "rabbitmq-cell1-server-0" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79") : configmap "rabbitmq-cell1-config-data" not found Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.640692 4784 scope.go:117] "RemoveContainer" containerID="26a913b7bf6de54902c4672761268a5ffa207b1d5eb9d15f8383c8ef9d0df22f" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.693796 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-8tvjg"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.705393 4784 scope.go:117] "RemoveContainer" containerID="4dcefed887e3875bfae2ad8c88e28b036487507c0804663771f7066b0d7cddac" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.749893 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-8tvjg"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.764399 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.772833 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.784179 4784 scope.go:117] "RemoveContainer" containerID="18606de9cd379cec6d03e611706882cefcfe82d03d59dd6bd082573a183009d9" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.788336 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.802587 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.858925 4784 scope.go:117] "RemoveContainer" containerID="a7e71ce3ce6e75c5e79d87518db38bd414fa98abc85f8580bc7cbfd40aaa0044" Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.912460 4784 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.912571 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f364fb9d-ca92-487a-9e6f-6d85a97117d0-operator-scripts podName:f364fb9d-ca92-487a-9e6f-6d85a97117d0 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:30.91253473 +0000 UTC m=+1412.958707567 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/f364fb9d-ca92-487a-9e6f-6d85a97117d0-operator-scripts") pod "root-account-create-update-sx6jh" (UID: "f364fb9d-ca92-487a-9e6f-6d85a97117d0") : configmap "openstack-cell1-scripts" not found Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.965894 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-n9mzl"] Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.966449 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700a4853-cbd0-4cc6-8322-d9296caadf34" containerName="ovsdbserver-nb" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966472 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="700a4853-cbd0-4cc6-8322-d9296caadf34" containerName="ovsdbserver-nb" Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.966483 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2585ada6-db24-4639-9f3b-d52919149935" containerName="nova-cell1-novncproxy-novncproxy" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966491 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="2585ada6-db24-4639-9f3b-d52919149935" containerName="nova-cell1-novncproxy-novncproxy" Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.966505 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b70b310-f1bb-4b3b-b679-9c11f98367ee" containerName="openstack-network-exporter" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966515 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b70b310-f1bb-4b3b-b679-9c11f98367ee" containerName="openstack-network-exporter" Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.966529 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce785668-f5b3-4be6-b466-d1041d0190d1" containerName="ovsdbserver-sb" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966535 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce785668-f5b3-4be6-b466-d1041d0190d1" containerName="ovsdbserver-sb" Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.966568 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce785668-f5b3-4be6-b466-d1041d0190d1" containerName="openstack-network-exporter" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966576 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce785668-f5b3-4be6-b466-d1041d0190d1" containerName="openstack-network-exporter" Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.966591 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ef35db6-a440-4394-a26f-750a29488828" containerName="ovn-controller" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966597 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ef35db6-a440-4394-a26f-750a29488828" containerName="ovn-controller" Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.966611 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e67aeba-582a-470f-a40f-e1def33f01d2" containerName="init" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966618 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e67aeba-582a-470f-a40f-e1def33f01d2" containerName="init" Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.966631 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700a4853-cbd0-4cc6-8322-d9296caadf34" containerName="openstack-network-exporter" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966637 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="700a4853-cbd0-4cc6-8322-d9296caadf34" containerName="openstack-network-exporter" Jan 06 08:38:29 crc kubenswrapper[4784]: E0106 08:38:29.966663 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e67aeba-582a-470f-a40f-e1def33f01d2" containerName="dnsmasq-dns" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966668 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e67aeba-582a-470f-a40f-e1def33f01d2" containerName="dnsmasq-dns" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966856 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ef35db6-a440-4394-a26f-750a29488828" containerName="ovn-controller" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966873 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce785668-f5b3-4be6-b466-d1041d0190d1" containerName="ovsdbserver-sb" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966886 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e67aeba-582a-470f-a40f-e1def33f01d2" containerName="dnsmasq-dns" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966898 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce785668-f5b3-4be6-b466-d1041d0190d1" containerName="openstack-network-exporter" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966906 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="2585ada6-db24-4639-9f3b-d52919149935" containerName="nova-cell1-novncproxy-novncproxy" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966915 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="700a4853-cbd0-4cc6-8322-d9296caadf34" containerName="ovsdbserver-nb" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966925 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="700a4853-cbd0-4cc6-8322-d9296caadf34" containerName="openstack-network-exporter" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.966931 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b70b310-f1bb-4b3b-b679-9c11f98367ee" containerName="openstack-network-exporter" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.967708 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-n9mzl" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.970210 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.982716 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-n9mzl"] Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.986886 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c78a-account-create-update-zjr59" Jan 06 08:38:29 crc kubenswrapper[4784]: I0106 08:38:29.998364 4784 scope.go:117] "RemoveContainer" containerID="2df75ac1bbb78adf7fae00bb5912e0ed3ad738ffa37d0df9b3d4f689510b22fa" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.012971 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdcgk\" (UniqueName: \"kubernetes.io/projected/de90cd55-5430-48c7-b519-a4398e495607-kube-api-access-fdcgk\") pod \"de90cd55-5430-48c7-b519-a4398e495607\" (UID: \"de90cd55-5430-48c7-b519-a4398e495607\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.013104 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/de90cd55-5430-48c7-b519-a4398e495607-operator-scripts\") pod \"de90cd55-5430-48c7-b519-a4398e495607\" (UID: \"de90cd55-5430-48c7-b519-a4398e495607\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.013325 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85mml\" (UniqueName: \"kubernetes.io/projected/34efb561-da62-425b-bb70-115757e6d00d-kube-api-access-85mml\") pod \"root-account-create-update-n9mzl\" (UID: \"34efb561-da62-425b-bb70-115757e6d00d\") " pod="openstack/root-account-create-update-n9mzl" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.013411 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34efb561-da62-425b-bb70-115757e6d00d-operator-scripts\") pod \"root-account-create-update-n9mzl\" (UID: \"34efb561-da62-425b-bb70-115757e6d00d\") " pod="openstack/root-account-create-update-n9mzl" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.015569 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de90cd55-5430-48c7-b519-a4398e495607-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "de90cd55-5430-48c7-b519-a4398e495607" (UID: "de90cd55-5430-48c7-b519-a4398e495607"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.021028 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de90cd55-5430-48c7-b519-a4398e495607-kube-api-access-fdcgk" (OuterVolumeSpecName: "kube-api-access-fdcgk") pod "de90cd55-5430-48c7-b519-a4398e495607" (UID: "de90cd55-5430-48c7-b519-a4398e495607"). InnerVolumeSpecName "kube-api-access-fdcgk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.116789 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85mml\" (UniqueName: \"kubernetes.io/projected/34efb561-da62-425b-bb70-115757e6d00d-kube-api-access-85mml\") pod \"root-account-create-update-n9mzl\" (UID: \"34efb561-da62-425b-bb70-115757e6d00d\") " pod="openstack/root-account-create-update-n9mzl" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.116885 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34efb561-da62-425b-bb70-115757e6d00d-operator-scripts\") pod \"root-account-create-update-n9mzl\" (UID: \"34efb561-da62-425b-bb70-115757e6d00d\") " pod="openstack/root-account-create-update-n9mzl" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.116971 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdcgk\" (UniqueName: \"kubernetes.io/projected/de90cd55-5430-48c7-b519-a4398e495607-kube-api-access-fdcgk\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.116996 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/de90cd55-5430-48c7-b519-a4398e495607-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.118362 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34efb561-da62-425b-bb70-115757e6d00d-operator-scripts\") pod \"root-account-create-update-n9mzl\" (UID: \"34efb561-da62-425b-bb70-115757e6d00d\") " pod="openstack/root-account-create-update-n9mzl" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.147495 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85mml\" (UniqueName: \"kubernetes.io/projected/34efb561-da62-425b-bb70-115757e6d00d-kube-api-access-85mml\") pod \"root-account-create-update-n9mzl\" (UID: \"34efb561-da62-425b-bb70-115757e6d00d\") " pod="openstack/root-account-create-update-n9mzl" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.191697 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-c594-account-create-update-qhcs2"] Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.202633 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-361f-account-create-update-94fnx"] Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.202861 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 06 08:38:30 crc kubenswrapper[4784]: E0106 08:38:30.229372 4784 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 06 08:38:30 crc kubenswrapper[4784]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 06 08:38:30 crc kubenswrapper[4784]: Jan 06 08:38:30 crc kubenswrapper[4784]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 06 08:38:30 crc kubenswrapper[4784]: Jan 06 08:38:30 crc kubenswrapper[4784]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 06 08:38:30 crc kubenswrapper[4784]: Jan 06 08:38:30 crc kubenswrapper[4784]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 06 08:38:30 crc kubenswrapper[4784]: Jan 06 08:38:30 crc kubenswrapper[4784]: if [ -n "nova_api" ]; then Jan 06 08:38:30 crc kubenswrapper[4784]: GRANT_DATABASE="nova_api" Jan 06 08:38:30 crc kubenswrapper[4784]: else Jan 06 08:38:30 crc kubenswrapper[4784]: GRANT_DATABASE="*" Jan 06 08:38:30 crc kubenswrapper[4784]: fi Jan 06 08:38:30 crc kubenswrapper[4784]: Jan 06 08:38:30 crc kubenswrapper[4784]: # going for maximum compatibility here: Jan 06 08:38:30 crc kubenswrapper[4784]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 06 08:38:30 crc kubenswrapper[4784]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 06 08:38:30 crc kubenswrapper[4784]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 06 08:38:30 crc kubenswrapper[4784]: # support updates Jan 06 08:38:30 crc kubenswrapper[4784]: Jan 06 08:38:30 crc kubenswrapper[4784]: $MYSQL_CMD < logger="UnhandledError" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.230160 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d356-account-create-update-2prdj" Jan 06 08:38:30 crc kubenswrapper[4784]: E0106 08:38:30.231219 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-api-db-secret\\\" not found\"" pod="openstack/nova-api-361f-account-create-update-94fnx" podUID="b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a" Jan 06 08:38:30 crc kubenswrapper[4784]: E0106 08:38:30.243905 4784 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 06 08:38:30 crc kubenswrapper[4784]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 06 08:38:30 crc kubenswrapper[4784]: Jan 06 08:38:30 crc kubenswrapper[4784]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 06 08:38:30 crc kubenswrapper[4784]: Jan 06 08:38:30 crc kubenswrapper[4784]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 06 08:38:30 crc kubenswrapper[4784]: Jan 06 08:38:30 crc kubenswrapper[4784]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 06 08:38:30 crc kubenswrapper[4784]: Jan 06 08:38:30 crc kubenswrapper[4784]: if [ -n "placement" ]; then Jan 06 08:38:30 crc kubenswrapper[4784]: GRANT_DATABASE="placement" Jan 06 08:38:30 crc kubenswrapper[4784]: else Jan 06 08:38:30 crc kubenswrapper[4784]: GRANT_DATABASE="*" Jan 06 08:38:30 crc kubenswrapper[4784]: fi Jan 06 08:38:30 crc kubenswrapper[4784]: Jan 06 08:38:30 crc kubenswrapper[4784]: # going for maximum compatibility here: Jan 06 08:38:30 crc kubenswrapper[4784]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 06 08:38:30 crc kubenswrapper[4784]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 06 08:38:30 crc kubenswrapper[4784]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 06 08:38:30 crc kubenswrapper[4784]: # support updates Jan 06 08:38:30 crc kubenswrapper[4784]: Jan 06 08:38:30 crc kubenswrapper[4784]: $MYSQL_CMD < logger="UnhandledError" Jan 06 08:38:30 crc kubenswrapper[4784]: E0106 08:38:30.246111 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"placement-db-secret\\\" not found\"" pod="openstack/placement-c594-account-create-update-qhcs2" podUID="11de2666-488c-4ccd-8d33-37e9e957a8c8" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.250157 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.310789 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-n9mzl" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.319006 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-operator-scripts\") pod \"32e811b4-672d-4aa2-905b-9406f594be5c\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.319386 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mf8zg\" (UniqueName: \"kubernetes.io/projected/32e811b4-672d-4aa2-905b-9406f594be5c-kube-api-access-mf8zg\") pod \"32e811b4-672d-4aa2-905b-9406f594be5c\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.319595 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32e811b4-672d-4aa2-905b-9406f594be5c-combined-ca-bundle\") pod \"32e811b4-672d-4aa2-905b-9406f594be5c\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.319657 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-config-data-default\") pod \"32e811b4-672d-4aa2-905b-9406f594be5c\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.319817 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/32e811b4-672d-4aa2-905b-9406f594be5c-config-data-generated\") pod \"32e811b4-672d-4aa2-905b-9406f594be5c\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.319835 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"32e811b4-672d-4aa2-905b-9406f594be5c\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.320052 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/32e811b4-672d-4aa2-905b-9406f594be5c-galera-tls-certs\") pod \"32e811b4-672d-4aa2-905b-9406f594be5c\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.320425 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32e811b4-672d-4aa2-905b-9406f594be5c-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "32e811b4-672d-4aa2-905b-9406f594be5c" (UID: "32e811b4-672d-4aa2-905b-9406f594be5c"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.320501 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-kolla-config\") pod \"32e811b4-672d-4aa2-905b-9406f594be5c\" (UID: \"32e811b4-672d-4aa2-905b-9406f594be5c\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.321102 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "32e811b4-672d-4aa2-905b-9406f594be5c" (UID: "32e811b4-672d-4aa2-905b-9406f594be5c"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.321138 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "32e811b4-672d-4aa2-905b-9406f594be5c" (UID: "32e811b4-672d-4aa2-905b-9406f594be5c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.321168 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "32e811b4-672d-4aa2-905b-9406f594be5c" (UID: "32e811b4-672d-4aa2-905b-9406f594be5c"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.321463 4784 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/32e811b4-672d-4aa2-905b-9406f594be5c-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.321504 4784 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.321518 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.321531 4784 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/32e811b4-672d-4aa2-905b-9406f594be5c-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.329893 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32e811b4-672d-4aa2-905b-9406f594be5c-kube-api-access-mf8zg" (OuterVolumeSpecName: "kube-api-access-mf8zg") pod "32e811b4-672d-4aa2-905b-9406f594be5c" (UID: "32e811b4-672d-4aa2-905b-9406f594be5c"). InnerVolumeSpecName "kube-api-access-mf8zg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.331200 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="fecd8c1e-482d-4469-a884-c357e0e66fe0" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.178:9292/healthcheck\": read tcp 10.217.0.2:51230->10.217.0.178:9292: read: connection reset by peer" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.331422 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="fecd8c1e-482d-4469-a884-c357e0e66fe0" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.178:9292/healthcheck\": read tcp 10.217.0.2:51216->10.217.0.178:9292: read: connection reset by peer" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.353370 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c4c22ba-056d-49c3-94ba-a9847f419943" path="/var/lib/kubelet/pods/0c4c22ba-056d-49c3-94ba-a9847f419943/volumes" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.353955 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ef35db6-a440-4394-a26f-750a29488828" path="/var/lib/kubelet/pods/0ef35db6-a440-4394-a26f-750a29488828/volumes" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.354652 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2585ada6-db24-4639-9f3b-d52919149935" path="/var/lib/kubelet/pods/2585ada6-db24-4639-9f3b-d52919149935/volumes" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.355737 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b70b310-f1bb-4b3b-b679-9c11f98367ee" path="/var/lib/kubelet/pods/5b70b310-f1bb-4b3b-b679-9c11f98367ee/volumes" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.357136 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5dce1cc6-db1c-4b61-adfe-a20a3751aad3" path="/var/lib/kubelet/pods/5dce1cc6-db1c-4b61-adfe-a20a3751aad3/volumes" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.360172 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60288dfb-94e1-4aef-a67c-3ad3d457d124" path="/var/lib/kubelet/pods/60288dfb-94e1-4aef-a67c-3ad3d457d124/volumes" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.362501 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="700a4853-cbd0-4cc6-8322-d9296caadf34" path="/var/lib/kubelet/pods/700a4853-cbd0-4cc6-8322-d9296caadf34/volumes" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.363122 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="776ae843-cb1c-4edd-9cb1-e7a9513e9aa5" path="/var/lib/kubelet/pods/776ae843-cb1c-4edd-9cb1-e7a9513e9aa5/volumes" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.364128 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e67aeba-582a-470f-a40f-e1def33f01d2" path="/var/lib/kubelet/pods/8e67aeba-582a-470f-a40f-e1def33f01d2/volumes" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.364922 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce785668-f5b3-4be6-b466-d1041d0190d1" path="/var/lib/kubelet/pods/ce785668-f5b3-4be6-b466-d1041d0190d1/volumes" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.365525 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5ec4933-ceca-4a4f-9206-72e01f451292" path="/var/lib/kubelet/pods/d5ec4933-ceca-4a4f-9206-72e01f451292/volumes" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.366566 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e64dac25-080f-43cf-9b56-8fca9d178614" path="/var/lib/kubelet/pods/e64dac25-080f-43cf-9b56-8fca9d178614/volumes" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.385119 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "mysql-db") pod "32e811b4-672d-4aa2-905b-9406f594be5c" (UID: "32e811b4-672d-4aa2-905b-9406f594be5c"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.416980 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32e811b4-672d-4aa2-905b-9406f594be5c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "32e811b4-672d-4aa2-905b-9406f594be5c" (UID: "32e811b4-672d-4aa2-905b-9406f594be5c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.422908 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-internal-tls-certs\") pod \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.423026 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-public-tls-certs\") pod \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.423083 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a98e0fb-025b-4a3c-8bbb-2a2f20132e38-operator-scripts\") pod \"0a98e0fb-025b-4a3c-8bbb-2a2f20132e38\" (UID: \"0a98e0fb-025b-4a3c-8bbb-2a2f20132e38\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.423120 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-log-httpd\") pod \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.423235 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-config-data\") pod \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.423275 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-run-httpd\") pod \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.423313 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-combined-ca-bundle\") pod \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.423348 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-etc-swift\") pod \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.423409 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2mwl\" (UniqueName: \"kubernetes.io/projected/0a98e0fb-025b-4a3c-8bbb-2a2f20132e38-kube-api-access-z2mwl\") pod \"0a98e0fb-025b-4a3c-8bbb-2a2f20132e38\" (UID: \"0a98e0fb-025b-4a3c-8bbb-2a2f20132e38\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.423445 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55xrm\" (UniqueName: \"kubernetes.io/projected/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-kube-api-access-55xrm\") pod \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\" (UID: \"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.423948 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mf8zg\" (UniqueName: \"kubernetes.io/projected/32e811b4-672d-4aa2-905b-9406f594be5c-kube-api-access-mf8zg\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.423967 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32e811b4-672d-4aa2-905b-9406f594be5c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.423988 4784 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.427887 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a98e0fb-025b-4a3c-8bbb-2a2f20132e38-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0a98e0fb-025b-4a3c-8bbb-2a2f20132e38" (UID: "0a98e0fb-025b-4a3c-8bbb-2a2f20132e38"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.428384 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" (UID: "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.428458 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" (UID: "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.430508 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a98e0fb-025b-4a3c-8bbb-2a2f20132e38-kube-api-access-z2mwl" (OuterVolumeSpecName: "kube-api-access-z2mwl") pod "0a98e0fb-025b-4a3c-8bbb-2a2f20132e38" (UID: "0a98e0fb-025b-4a3c-8bbb-2a2f20132e38"). InnerVolumeSpecName "kube-api-access-z2mwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.437631 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" (UID: "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.463093 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32e811b4-672d-4aa2-905b-9406f594be5c-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "32e811b4-672d-4aa2-905b-9406f594be5c" (UID: "32e811b4-672d-4aa2-905b-9406f594be5c"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.485620 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-kube-api-access-55xrm" (OuterVolumeSpecName: "kube-api-access-55xrm") pod "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" (UID: "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2"). InnerVolumeSpecName "kube-api-access-55xrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.508099 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" (UID: "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.524513 4784 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.556996 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a98e0fb-025b-4a3c-8bbb-2a2f20132e38-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.557034 4784 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.557048 4784 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.557062 4784 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/32e811b4-672d-4aa2-905b-9406f594be5c-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.557074 4784 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.557085 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.557097 4784 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.557109 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2mwl\" (UniqueName: \"kubernetes.io/projected/0a98e0fb-025b-4a3c-8bbb-2a2f20132e38-kube-api-access-z2mwl\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.557126 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55xrm\" (UniqueName: \"kubernetes.io/projected/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-kube-api-access-55xrm\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.560379 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" (UID: "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.579892 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c78a-account-create-update-zjr59" event={"ID":"de90cd55-5430-48c7-b519-a4398e495607","Type":"ContainerDied","Data":"a55ac9f5c9cd26f259fb83887e86410eaa008ee5d296ec60d38a21498620f27e"} Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.580006 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c78a-account-create-update-zjr59" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.608471 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c594-account-create-update-qhcs2" event={"ID":"11de2666-488c-4ccd-8d33-37e9e957a8c8","Type":"ContainerStarted","Data":"4b272acebe1adaf9541632ec59f9a50b41fdceb1018c61d25254df6b25783882"} Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.613365 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" (UID: "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.621043 4784 generic.go:334] "Generic (PLEG): container finished" podID="5f153c14-0bd9-4c9f-a8fc-c54c80722bce" containerID="286e16d27fa94436ac1831d5cc52871c589d62a999f488353b1b1767a2d56d65" exitCode=0 Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.621119 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"5f153c14-0bd9-4c9f-a8fc-c54c80722bce","Type":"ContainerDied","Data":"286e16d27fa94436ac1831d5cc52871c589d62a999f488353b1b1767a2d56d65"} Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.621149 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"5f153c14-0bd9-4c9f-a8fc-c54c80722bce","Type":"ContainerDied","Data":"33daef59f535476e9244293a33ebbf460367627addd402a1197c073a7d2862bc"} Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.621161 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="33daef59f535476e9244293a33ebbf460367627addd402a1197c073a7d2862bc" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.628557 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" event={"ID":"ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2","Type":"ContainerDied","Data":"4a006b1e8f0548fd4e4398d39d25d3b707e9b4348252c4a228e450fc2f8e8164"} Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.628631 4784 scope.go:117] "RemoveContainer" containerID="bc9bbb81f969e4ccb4a50e67c4b725fd7aaad864e06456377517ca86818bce99" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.628797 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-ddd99f6b5-9vfkd" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.640054 4784 generic.go:334] "Generic (PLEG): container finished" podID="32e811b4-672d-4aa2-905b-9406f594be5c" containerID="a04341a51a59aa6ffcaa076aa65515a7b5edcbbb44d3b167585185546814e56a" exitCode=0 Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.640150 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"32e811b4-672d-4aa2-905b-9406f594be5c","Type":"ContainerDied","Data":"a04341a51a59aa6ffcaa076aa65515a7b5edcbbb44d3b167585185546814e56a"} Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.641601 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"32e811b4-672d-4aa2-905b-9406f594be5c","Type":"ContainerDied","Data":"c751a3aefb774c2a1e0749606c08e5fa9157a8c35e18468a959564cb251f7415"} Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.643461 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.649149 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-config-data" (OuterVolumeSpecName: "config-data") pod "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" (UID: "ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.654309 4784 generic.go:334] "Generic (PLEG): container finished" podID="fecd8c1e-482d-4469-a884-c357e0e66fe0" containerID="daa8748da271a4d548f8c192fbf0ea343adcd94a49510154aa63c807c38815e3" exitCode=0 Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.654476 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"fecd8c1e-482d-4469-a884-c357e0e66fe0","Type":"ContainerDied","Data":"daa8748da271a4d548f8c192fbf0ea343adcd94a49510154aa63c807c38815e3"} Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.659226 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.659339 4784 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.659393 4784 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:30 crc kubenswrapper[4784]: E0106 08:38:30.660123 4784 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 06 08:38:30 crc kubenswrapper[4784]: E0106 08:38:30.660236 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data podName:052ecaa6-58fd-42ed-b2c5-6b8919470619 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:34.660217894 +0000 UTC m=+1416.706390731 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data") pod "rabbitmq-server-0" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619") : configmap "rabbitmq-config-data" not found Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.663796 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d356-account-create-update-2prdj" event={"ID":"0a98e0fb-025b-4a3c-8bbb-2a2f20132e38","Type":"ContainerDied","Data":"2acc1cdac69071e461e9e2ecd49fad8422a18ad8e39dd5342f45caa350e8fd07"} Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.663914 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d356-account-create-update-2prdj" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.675925 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-361f-account-create-update-94fnx" event={"ID":"b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a","Type":"ContainerStarted","Data":"a9e61107fa012d8a29b318487516cf0a7ed7eb7a70070b7d6c8c93ddb4150cd7"} Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.760557 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.783672 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.784305 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="ceilometer-central-agent" containerID="cri-o://4ef361b972f42bea1be49ec4ca483a7dbcd0419667a1513a9db6c46533072348" gracePeriod=30 Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.784567 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="proxy-httpd" containerID="cri-o://720bfe1553db1a3c166e22d70a377ca997d8451c2dc8326f20d8aeb1c6b818b6" gracePeriod=30 Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.784675 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="sg-core" containerID="cri-o://7f0e5dcde65f104bcb37b1f44544e265a3b934ad96e3df20a22d72ff4674f3a0" gracePeriod=30 Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.784809 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="ceilometer-notification-agent" containerID="cri-o://dc6835d04241d55d04ae4a35e417f32bdfac1a264f3e1266dbf5a6c0cea46b5d" gracePeriod=30 Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.823205 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-c78a-account-create-update-zjr59"] Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.842456 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-c78a-account-create-update-zjr59"] Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.866155 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2vf7\" (UniqueName: \"kubernetes.io/projected/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-kube-api-access-m2vf7\") pod \"5f153c14-0bd9-4c9f-a8fc-c54c80722bce\" (UID: \"5f153c14-0bd9-4c9f-a8fc-c54c80722bce\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.866690 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-combined-ca-bundle\") pod \"5f153c14-0bd9-4c9f-a8fc-c54c80722bce\" (UID: \"5f153c14-0bd9-4c9f-a8fc-c54c80722bce\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.866829 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-config-data\") pod \"5f153c14-0bd9-4c9f-a8fc-c54c80722bce\" (UID: \"5f153c14-0bd9-4c9f-a8fc-c54c80722bce\") " Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.890717 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-kube-api-access-m2vf7" (OuterVolumeSpecName: "kube-api-access-m2vf7") pod "5f153c14-0bd9-4c9f-a8fc-c54c80722bce" (UID: "5f153c14-0bd9-4c9f-a8fc-c54c80722bce"). InnerVolumeSpecName "kube-api-access-m2vf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.892080 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-d356-account-create-update-2prdj"] Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.892106 4784 scope.go:117] "RemoveContainer" containerID="8885e8dca6c2747b87500b9c0f62e03258c8d67a971af2cba425f6a3b53371bf" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.948014 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="e94ed326-8f56-4933-8616-5814505b58f5" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.179:9292/healthcheck\": read tcp 10.217.0.2:57740->10.217.0.179:9292: read: connection reset by peer" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.949871 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="e94ed326-8f56-4933-8616-5814505b58f5" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.179:9292/healthcheck\": read tcp 10.217.0.2:57724->10.217.0.179:9292: read: connection reset by peer" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.953898 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="15cd1678-570e-47b5-bcb0-6745b8aa95cb" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.166:8776/healthcheck\": read tcp 10.217.0.2:35294->10.217.0.166:8776: read: connection reset by peer" Jan 06 08:38:30 crc kubenswrapper[4784]: I0106 08:38:30.954462 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-d356-account-create-update-2prdj"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.012859 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-config-data" (OuterVolumeSpecName: "config-data") pod "5f153c14-0bd9-4c9f-a8fc-c54c80722bce" (UID: "5f153c14-0bd9-4c9f-a8fc-c54c80722bce"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.056292 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.065753 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2vf7\" (UniqueName: \"kubernetes.io/projected/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-kube-api-access-m2vf7\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.056647 4784 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.065899 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f364fb9d-ca92-487a-9e6f-6d85a97117d0-operator-scripts podName:f364fb9d-ca92-487a-9e6f-6d85a97117d0 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:33.065861198 +0000 UTC m=+1415.112034035 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/f364fb9d-ca92-487a-9e6f-6d85a97117d0-operator-scripts") pod "root-account-create-update-sx6jh" (UID: "f364fb9d-ca92-487a-9e6f-6d85a97117d0") : configmap "openstack-cell1-scripts" not found Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.097836 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.098117 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="f1733b30-f3c1-414f-9140-f42583e97d31" containerName="kube-state-metrics" containerID="cri-o://6b3f7fd4b97b2de294cdc7d27d5014fd5bf46b02dd68f76b1ad9fb9236ae1bba" gracePeriod=30 Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.155411 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5f153c14-0bd9-4c9f-a8fc-c54c80722bce" (UID: "5f153c14-0bd9-4c9f-a8fc-c54c80722bce"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.182155 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f153c14-0bd9-4c9f-a8fc-c54c80722bce-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.201324 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.216161 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.218323 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.220033 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.228258 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.228351 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.228378 4784 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-2n9kz" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovsdb-server" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.233882 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.238256 4784 scope.go:117] "RemoveContainer" containerID="a04341a51a59aa6ffcaa076aa65515a7b5edcbbb44d3b167585185546814e56a" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.242681 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-dc75-account-create-update-vmhbs"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.257095 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.257525 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="3627acbf-1c12-4e8e-97f0-e44a6cd124c3" containerName="memcached" containerID="cri-o://4287d43d16c939ca0cffadb1469454f8c0eafb31a4cd129fbdd4830a7590fe28" gracePeriod=30 Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.264569 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-dc75-account-create-update-vmhbs"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.278084 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-dc75-account-create-update-l22h6"] Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.278817 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" containerName="proxy-server" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.278886 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" containerName="proxy-server" Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.278998 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32e811b4-672d-4aa2-905b-9406f594be5c" containerName="galera" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.279048 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="32e811b4-672d-4aa2-905b-9406f594be5c" containerName="galera" Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.279100 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" containerName="proxy-httpd" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.279177 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" containerName="proxy-httpd" Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.279298 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32e811b4-672d-4aa2-905b-9406f594be5c" containerName="mysql-bootstrap" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.279349 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="32e811b4-672d-4aa2-905b-9406f594be5c" containerName="mysql-bootstrap" Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.279528 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f153c14-0bd9-4c9f-a8fc-c54c80722bce" containerName="nova-cell1-conductor-conductor" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.279598 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f153c14-0bd9-4c9f-a8fc-c54c80722bce" containerName="nova-cell1-conductor-conductor" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.281663 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="32e811b4-672d-4aa2-905b-9406f594be5c" containerName="galera" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.281748 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" containerName="proxy-httpd" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.281801 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f153c14-0bd9-4c9f-a8fc-c54c80722bce" containerName="nova-cell1-conductor-conductor" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.281859 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" containerName="proxy-server" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.282681 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc75-account-create-update-l22h6" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.285500 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-7e75-account-create-update-95l2w"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.290778 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.295197 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-dc75-account-create-update-l22h6"] Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.301474 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.301669 4784 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-2n9kz" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovs-vswitchd" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.302900 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-gp9r7"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.316687 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-fhdrv"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.325350 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-fhdrv"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.332344 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-gp9r7"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.340351 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-5c58bd8cfd-bjvgd"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.340723 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-5c58bd8cfd-bjvgd" podUID="47f75a1e-4d3b-4460-8420-05ac7e981c8e" containerName="keystone-api" containerID="cri-o://4223cd132ca6515fc76e29d4b62d62f23ca0125b03efe4932036e3c3b22ddecd" gracePeriod=30 Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.353623 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.367594 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-c9v66"] Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.370618 4784 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 06 08:38:31 crc kubenswrapper[4784]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 06 08:38:31 crc kubenswrapper[4784]: Jan 06 08:38:31 crc kubenswrapper[4784]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 06 08:38:31 crc kubenswrapper[4784]: Jan 06 08:38:31 crc kubenswrapper[4784]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 06 08:38:31 crc kubenswrapper[4784]: Jan 06 08:38:31 crc kubenswrapper[4784]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 06 08:38:31 crc kubenswrapper[4784]: Jan 06 08:38:31 crc kubenswrapper[4784]: if [ -n "nova_cell0" ]; then Jan 06 08:38:31 crc kubenswrapper[4784]: GRANT_DATABASE="nova_cell0" Jan 06 08:38:31 crc kubenswrapper[4784]: else Jan 06 08:38:31 crc kubenswrapper[4784]: GRANT_DATABASE="*" Jan 06 08:38:31 crc kubenswrapper[4784]: fi Jan 06 08:38:31 crc kubenswrapper[4784]: Jan 06 08:38:31 crc kubenswrapper[4784]: # going for maximum compatibility here: Jan 06 08:38:31 crc kubenswrapper[4784]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 06 08:38:31 crc kubenswrapper[4784]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 06 08:38:31 crc kubenswrapper[4784]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 06 08:38:31 crc kubenswrapper[4784]: # support updates Jan 06 08:38:31 crc kubenswrapper[4784]: Jan 06 08:38:31 crc kubenswrapper[4784]: $MYSQL_CMD < logger="UnhandledError" Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.373283 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-cell0-db-secret\\\" not found\"" pod="openstack/nova-cell0-7e75-account-create-update-95l2w" podUID="b3f9f481-e72f-47a3-bd2c-33ec9bb8025b" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.390061 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-operator-scripts\") pod \"keystone-dc75-account-create-update-l22h6\" (UID: \"ed8af5aa-332a-4950-8bbb-ab976b7a2b64\") " pod="openstack/keystone-dc75-account-create-update-l22h6" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.396657 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrl6s\" (UniqueName: \"kubernetes.io/projected/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-kube-api-access-nrl6s\") pod \"keystone-dc75-account-create-update-l22h6\" (UID: \"ed8af5aa-332a-4950-8bbb-ab976b7a2b64\") " pod="openstack/keystone-dc75-account-create-update-l22h6" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.396714 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-c9v66"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.409105 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-dc75-account-create-update-l22h6"] Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.414931 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-nrl6s operator-scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/keystone-dc75-account-create-update-l22h6" podUID="ed8af5aa-332a-4950-8bbb-ab976b7a2b64" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.469528 4784 scope.go:117] "RemoveContainer" containerID="cc2cd38ef6247570e98675603c9844d91f21cf012397d97638c919f649c76139" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.484310 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-n9mzl"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.497844 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-84c65dd87b-gpr7l" podUID="db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.164:9311/healthcheck\": read tcp 10.217.0.2:45390->10.217.0.164:9311: read: connection reset by peer" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.498342 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-84c65dd87b-gpr7l" podUID="db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.164:9311/healthcheck\": read tcp 10.217.0.2:45398->10.217.0.164:9311: read: connection reset by peer" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.501084 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-operator-scripts\") pod \"keystone-dc75-account-create-update-l22h6\" (UID: \"ed8af5aa-332a-4950-8bbb-ab976b7a2b64\") " pod="openstack/keystone-dc75-account-create-update-l22h6" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.501128 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrl6s\" (UniqueName: \"kubernetes.io/projected/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-kube-api-access-nrl6s\") pod \"keystone-dc75-account-create-update-l22h6\" (UID: \"ed8af5aa-332a-4950-8bbb-ab976b7a2b64\") " pod="openstack/keystone-dc75-account-create-update-l22h6" Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.526253 4784 projected.go:194] Error preparing data for projected volume kube-api-access-nrl6s for pod openstack/keystone-dc75-account-create-update-l22h6: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.526620 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-kube-api-access-nrl6s podName:ed8af5aa-332a-4950-8bbb-ab976b7a2b64 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:32.026587662 +0000 UTC m=+1414.072760499 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-nrl6s" (UniqueName: "kubernetes.io/projected/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-kube-api-access-nrl6s") pod "keystone-dc75-account-create-update-l22h6" (UID: "ed8af5aa-332a-4950-8bbb-ab976b7a2b64") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.529090 4784 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.532304 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-operator-scripts podName:ed8af5aa-332a-4950-8bbb-ab976b7a2b64 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:32.032202096 +0000 UTC m=+1414.078374943 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-operator-scripts") pod "keystone-dc75-account-create-update-l22h6" (UID: "ed8af5aa-332a-4950-8bbb-ab976b7a2b64") : configmap "openstack-scripts" not found Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.555243 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-361f-account-create-update-94fnx" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.555448 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.558705 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-ddd99f6b5-9vfkd"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.564705 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-ddd99f6b5-9vfkd"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.603731 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-combined-ca-bundle\") pod \"fecd8c1e-482d-4469-a884-c357e0e66fe0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.603789 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"fecd8c1e-482d-4469-a884-c357e0e66fe0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.603826 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-config-data\") pod \"fecd8c1e-482d-4469-a884-c357e0e66fe0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.603851 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-scripts\") pod \"fecd8c1e-482d-4469-a884-c357e0e66fe0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.603880 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a-operator-scripts\") pod \"b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a\" (UID: \"b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a\") " Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.604022 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mggl2\" (UniqueName: \"kubernetes.io/projected/b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a-kube-api-access-mggl2\") pod \"b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a\" (UID: \"b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a\") " Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.604071 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fecd8c1e-482d-4469-a884-c357e0e66fe0-logs\") pod \"fecd8c1e-482d-4469-a884-c357e0e66fe0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.604096 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-public-tls-certs\") pod \"fecd8c1e-482d-4469-a884-c357e0e66fe0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.604121 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hmlxj\" (UniqueName: \"kubernetes.io/projected/fecd8c1e-482d-4469-a884-c357e0e66fe0-kube-api-access-hmlxj\") pod \"fecd8c1e-482d-4469-a884-c357e0e66fe0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.604149 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fecd8c1e-482d-4469-a884-c357e0e66fe0-httpd-run\") pod \"fecd8c1e-482d-4469-a884-c357e0e66fe0\" (UID: \"fecd8c1e-482d-4469-a884-c357e0e66fe0\") " Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.605316 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fecd8c1e-482d-4469-a884-c357e0e66fe0-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "fecd8c1e-482d-4469-a884-c357e0e66fe0" (UID: "fecd8c1e-482d-4469-a884-c357e0e66fe0"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.618110 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fecd8c1e-482d-4469-a884-c357e0e66fe0-logs" (OuterVolumeSpecName: "logs") pod "fecd8c1e-482d-4469-a884-c357e0e66fe0" (UID: "fecd8c1e-482d-4469-a884-c357e0e66fe0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.621166 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a" (UID: "b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.622857 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a-kube-api-access-mggl2" (OuterVolumeSpecName: "kube-api-access-mggl2") pod "b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a" (UID: "b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a"). InnerVolumeSpecName "kube-api-access-mggl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.624760 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "fecd8c1e-482d-4469-a884-c357e0e66fe0" (UID: "fecd8c1e-482d-4469-a884-c357e0e66fe0"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.653139 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-scripts" (OuterVolumeSpecName: "scripts") pod "fecd8c1e-482d-4469-a884-c357e0e66fe0" (UID: "fecd8c1e-482d-4469-a884-c357e0e66fe0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.653303 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fecd8c1e-482d-4469-a884-c357e0e66fe0-kube-api-access-hmlxj" (OuterVolumeSpecName: "kube-api-access-hmlxj") pod "fecd8c1e-482d-4469-a884-c357e0e66fe0" (UID: "fecd8c1e-482d-4469-a884-c357e0e66fe0"). InnerVolumeSpecName "kube-api-access-hmlxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.688208 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fecd8c1e-482d-4469-a884-c357e0e66fe0" (UID: "fecd8c1e-482d-4469-a884-c357e0e66fe0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.708776 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fecd8c1e-482d-4469-a884-c357e0e66fe0-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.708808 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hmlxj\" (UniqueName: \"kubernetes.io/projected/fecd8c1e-482d-4469-a884-c357e0e66fe0-kube-api-access-hmlxj\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.708834 4784 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fecd8c1e-482d-4469-a884-c357e0e66fe0-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.708845 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.708873 4784 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.708884 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.708895 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.708905 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mggl2\" (UniqueName: \"kubernetes.io/projected/b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a-kube-api-access-mggl2\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.713524 4784 generic.go:334] "Generic (PLEG): container finished" podID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerID="720bfe1553db1a3c166e22d70a377ca997d8451c2dc8326f20d8aeb1c6b818b6" exitCode=0 Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.713581 4784 generic.go:334] "Generic (PLEG): container finished" podID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerID="7f0e5dcde65f104bcb37b1f44544e265a3b934ad96e3df20a22d72ff4674f3a0" exitCode=2 Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.713593 4784 generic.go:334] "Generic (PLEG): container finished" podID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerID="4ef361b972f42bea1be49ec4ca483a7dbcd0419667a1513a9db6c46533072348" exitCode=0 Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.713697 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d","Type":"ContainerDied","Data":"720bfe1553db1a3c166e22d70a377ca997d8451c2dc8326f20d8aeb1c6b818b6"} Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.713733 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d","Type":"ContainerDied","Data":"7f0e5dcde65f104bcb37b1f44544e265a3b934ad96e3df20a22d72ff4674f3a0"} Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.713745 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d","Type":"ContainerDied","Data":"4ef361b972f42bea1be49ec4ca483a7dbcd0419667a1513a9db6c46533072348"} Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.713743 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="b6a277ac-73de-4e2b-b39f-73d467b2222c" containerName="galera" containerID="cri-o://e9133ce89d3aa5addf5d0b1b6c3f09deddf66ba69d3abc4a37dfab0a890825aa" gracePeriod=30 Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.727187 4784 generic.go:334] "Generic (PLEG): container finished" podID="db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" containerID="6fffca0776ab46872f3102c7fc0a42be2362bb53a3780f33a08136d43b9b8eae" exitCode=0 Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.727506 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "fecd8c1e-482d-4469-a884-c357e0e66fe0" (UID: "fecd8c1e-482d-4469-a884-c357e0e66fe0"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.727510 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84c65dd87b-gpr7l" event={"ID":"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1","Type":"ContainerDied","Data":"6fffca0776ab46872f3102c7fc0a42be2362bb53a3780f33a08136d43b9b8eae"} Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.732574 4784 generic.go:334] "Generic (PLEG): container finished" podID="15cd1678-570e-47b5-bcb0-6745b8aa95cb" containerID="952523238c0159b22bae45fd0feb984398f4e41c261b86b0227306adbbc37885" exitCode=0 Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.732620 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"15cd1678-570e-47b5-bcb0-6745b8aa95cb","Type":"ContainerDied","Data":"952523238c0159b22bae45fd0feb984398f4e41c261b86b0227306adbbc37885"} Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.734743 4784 generic.go:334] "Generic (PLEG): container finished" podID="f1733b30-f3c1-414f-9140-f42583e97d31" containerID="6b3f7fd4b97b2de294cdc7d27d5014fd5bf46b02dd68f76b1ad9fb9236ae1bba" exitCode=2 Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.734781 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f1733b30-f3c1-414f-9140-f42583e97d31","Type":"ContainerDied","Data":"6b3f7fd4b97b2de294cdc7d27d5014fd5bf46b02dd68f76b1ad9fb9236ae1bba"} Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.735101 4784 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.744058 4784 generic.go:334] "Generic (PLEG): container finished" podID="e94ed326-8f56-4933-8616-5814505b58f5" containerID="54f4dd31065d6db90e2a8b67fbc54756e16347bc95f37040733d2d6e10eb17a1" exitCode=0 Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.744120 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e94ed326-8f56-4933-8616-5814505b58f5","Type":"ContainerDied","Data":"54f4dd31065d6db90e2a8b67fbc54756e16347bc95f37040733d2d6e10eb17a1"} Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.746430 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-config-data" (OuterVolumeSpecName: "config-data") pod "fecd8c1e-482d-4469-a884-c357e0e66fe0" (UID: "fecd8c1e-482d-4469-a884-c357e0e66fe0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.754443 4784 generic.go:334] "Generic (PLEG): container finished" podID="6cd2b801-83a4-410f-a555-8dfda270713a" containerID="7edfdbc25e2ee69a5e5b15b59c0c29dcf7e91b34fc5883ef77bb32176ff55cd7" exitCode=0 Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.754525 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5759d5cbc4-2r87d" event={"ID":"6cd2b801-83a4-410f-a555-8dfda270713a","Type":"ContainerDied","Data":"7edfdbc25e2ee69a5e5b15b59c0c29dcf7e91b34fc5883ef77bb32176ff55cd7"} Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.756668 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7e75-account-create-update-95l2w" event={"ID":"b3f9f481-e72f-47a3-bd2c-33ec9bb8025b","Type":"ContainerStarted","Data":"93866ea0163be68c7c005b0ae457e39c238a9afe82f33c78d32ee4b095bf5033"} Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.774819 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-361f-account-create-update-94fnx" event={"ID":"b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a","Type":"ContainerDied","Data":"a9e61107fa012d8a29b318487516cf0a7ed7eb7a70070b7d6c8c93ddb4150cd7"} Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.774946 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-361f-account-create-update-94fnx" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.812759 4784 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.812803 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.812815 4784 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fecd8c1e-482d-4469-a884-c357e0e66fe0-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.831097 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"fecd8c1e-482d-4469-a884-c357e0e66fe0","Type":"ContainerDied","Data":"35363fe16c783651f53f84efaf787460f1de140c24571253d432c8982d3aa3c6"} Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.831228 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.848059 4784 generic.go:334] "Generic (PLEG): container finished" podID="79cac775-c143-4370-bf3b-b25e2ca62120" containerID="00d276555cdbfb9bcc2cbe375f52631931dd7b8c8efe7fb96c6f9c7ad948b1ab" exitCode=0 Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.848137 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc75-account-create-update-l22h6" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.848633 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"79cac775-c143-4370-bf3b-b25e2ca62120","Type":"ContainerDied","Data":"00d276555cdbfb9bcc2cbe375f52631931dd7b8c8efe7fb96c6f9c7ad948b1ab"} Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.848846 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.867659 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-sx6jh" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.914437 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9plhn\" (UniqueName: \"kubernetes.io/projected/f364fb9d-ca92-487a-9e6f-6d85a97117d0-kube-api-access-9plhn\") pod \"f364fb9d-ca92-487a-9e6f-6d85a97117d0\" (UID: \"f364fb9d-ca92-487a-9e6f-6d85a97117d0\") " Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.914582 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f364fb9d-ca92-487a-9e6f-6d85a97117d0-operator-scripts\") pod \"f364fb9d-ca92-487a-9e6f-6d85a97117d0\" (UID: \"f364fb9d-ca92-487a-9e6f-6d85a97117d0\") " Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.922692 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f364fb9d-ca92-487a-9e6f-6d85a97117d0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f364fb9d-ca92-487a-9e6f-6d85a97117d0" (UID: "f364fb9d-ca92-487a-9e6f-6d85a97117d0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.924053 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f364fb9d-ca92-487a-9e6f-6d85a97117d0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.924291 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c594-account-create-update-qhcs2" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.925514 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f364fb9d-ca92-487a-9e6f-6d85a97117d0-kube-api-access-9plhn" (OuterVolumeSpecName: "kube-api-access-9plhn") pod "f364fb9d-ca92-487a-9e6f-6d85a97117d0" (UID: "f364fb9d-ca92-487a-9e6f-6d85a97117d0"). InnerVolumeSpecName "kube-api-access-9plhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.931298 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.936020 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc75-account-create-update-l22h6" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.947803 4784 scope.go:117] "RemoveContainer" containerID="a04341a51a59aa6ffcaa076aa65515a7b5edcbbb44d3b167585185546814e56a" Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.949396 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a04341a51a59aa6ffcaa076aa65515a7b5edcbbb44d3b167585185546814e56a\": container with ID starting with a04341a51a59aa6ffcaa076aa65515a7b5edcbbb44d3b167585185546814e56a not found: ID does not exist" containerID="a04341a51a59aa6ffcaa076aa65515a7b5edcbbb44d3b167585185546814e56a" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.949506 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a04341a51a59aa6ffcaa076aa65515a7b5edcbbb44d3b167585185546814e56a"} err="failed to get container status \"a04341a51a59aa6ffcaa076aa65515a7b5edcbbb44d3b167585185546814e56a\": rpc error: code = NotFound desc = could not find container \"a04341a51a59aa6ffcaa076aa65515a7b5edcbbb44d3b167585185546814e56a\": container with ID starting with a04341a51a59aa6ffcaa076aa65515a7b5edcbbb44d3b167585185546814e56a not found: ID does not exist" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.949629 4784 scope.go:117] "RemoveContainer" containerID="cc2cd38ef6247570e98675603c9844d91f21cf012397d97638c919f649c76139" Jan 06 08:38:31 crc kubenswrapper[4784]: E0106 08:38:31.959790 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc2cd38ef6247570e98675603c9844d91f21cf012397d97638c919f649c76139\": container with ID starting with cc2cd38ef6247570e98675603c9844d91f21cf012397d97638c919f649c76139 not found: ID does not exist" containerID="cc2cd38ef6247570e98675603c9844d91f21cf012397d97638c919f649c76139" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.959841 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc2cd38ef6247570e98675603c9844d91f21cf012397d97638c919f649c76139"} err="failed to get container status \"cc2cd38ef6247570e98675603c9844d91f21cf012397d97638c919f649c76139\": rpc error: code = NotFound desc = could not find container \"cc2cd38ef6247570e98675603c9844d91f21cf012397d97638c919f649c76139\": container with ID starting with cc2cd38ef6247570e98675603c9844d91f21cf012397d97638c919f649c76139 not found: ID does not exist" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.959873 4784 scope.go:117] "RemoveContainer" containerID="daa8748da271a4d548f8c192fbf0ea343adcd94a49510154aa63c807c38815e3" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.977701 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.980280 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.984688 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.992654 4784 scope.go:117] "RemoveContainer" containerID="c7a53b94cb251c8ef3e62bbecc07389e6162337f8fbd7425b6a2aa4930128cb4" Jan 06 08:38:31 crc kubenswrapper[4784]: I0106 08:38:31.994903 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.024071 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-361f-account-create-update-94fnx"] Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.029584 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-config-data\") pod \"6cd2b801-83a4-410f-a555-8dfda270713a\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.029656 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11de2666-488c-4ccd-8d33-37e9e957a8c8-operator-scripts\") pod \"11de2666-488c-4ccd-8d33-37e9e957a8c8\" (UID: \"11de2666-488c-4ccd-8d33-37e9e957a8c8\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.029694 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-internal-tls-certs\") pod \"e94ed326-8f56-4933-8616-5814505b58f5\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.029760 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-public-tls-certs\") pod \"6cd2b801-83a4-410f-a555-8dfda270713a\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.029836 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-scripts\") pod \"e94ed326-8f56-4933-8616-5814505b58f5\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.029892 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-combined-ca-bundle\") pod \"e94ed326-8f56-4933-8616-5814505b58f5\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.029917 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6cd2b801-83a4-410f-a555-8dfda270713a-logs\") pod \"6cd2b801-83a4-410f-a555-8dfda270713a\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.029944 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-scripts\") pod \"6cd2b801-83a4-410f-a555-8dfda270713a\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.029975 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"e94ed326-8f56-4933-8616-5814505b58f5\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.029996 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e94ed326-8f56-4933-8616-5814505b58f5-logs\") pod \"e94ed326-8f56-4933-8616-5814505b58f5\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.030019 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mzls\" (UniqueName: \"kubernetes.io/projected/11de2666-488c-4ccd-8d33-37e9e957a8c8-kube-api-access-9mzls\") pod \"11de2666-488c-4ccd-8d33-37e9e957a8c8\" (UID: \"11de2666-488c-4ccd-8d33-37e9e957a8c8\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.030072 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-combined-ca-bundle\") pod \"6cd2b801-83a4-410f-a555-8dfda270713a\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.030089 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-config-data\") pod \"e94ed326-8f56-4933-8616-5814505b58f5\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.030108 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-internal-tls-certs\") pod \"6cd2b801-83a4-410f-a555-8dfda270713a\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.030142 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vrbv\" (UniqueName: \"kubernetes.io/projected/e94ed326-8f56-4933-8616-5814505b58f5-kube-api-access-5vrbv\") pod \"e94ed326-8f56-4933-8616-5814505b58f5\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.030183 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e94ed326-8f56-4933-8616-5814505b58f5-httpd-run\") pod \"e94ed326-8f56-4933-8616-5814505b58f5\" (UID: \"e94ed326-8f56-4933-8616-5814505b58f5\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.030206 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hm6qc\" (UniqueName: \"kubernetes.io/projected/6cd2b801-83a4-410f-a555-8dfda270713a-kube-api-access-hm6qc\") pod \"6cd2b801-83a4-410f-a555-8dfda270713a\" (UID: \"6cd2b801-83a4-410f-a555-8dfda270713a\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.030509 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrl6s\" (UniqueName: \"kubernetes.io/projected/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-kube-api-access-nrl6s\") pod \"keystone-dc75-account-create-update-l22h6\" (UID: \"ed8af5aa-332a-4950-8bbb-ab976b7a2b64\") " pod="openstack/keystone-dc75-account-create-update-l22h6" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.030710 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9plhn\" (UniqueName: \"kubernetes.io/projected/f364fb9d-ca92-487a-9e6f-6d85a97117d0-kube-api-access-9plhn\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.036459 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.036683 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-361f-account-create-update-94fnx"] Jan 06 08:38:32 crc kubenswrapper[4784]: E0106 08:38:32.036856 4784 projected.go:194] Error preparing data for projected volume kube-api-access-nrl6s for pod openstack/keystone-dc75-account-create-update-l22h6: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 06 08:38:32 crc kubenswrapper[4784]: E0106 08:38:32.036953 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-kube-api-access-nrl6s podName:ed8af5aa-332a-4950-8bbb-ab976b7a2b64 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:33.036927427 +0000 UTC m=+1415.083100264 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-nrl6s" (UniqueName: "kubernetes.io/projected/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-kube-api-access-nrl6s") pod "keystone-dc75-account-create-update-l22h6" (UID: "ed8af5aa-332a-4950-8bbb-ab976b7a2b64") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.038235 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e94ed326-8f56-4933-8616-5814505b58f5-logs" (OuterVolumeSpecName: "logs") pod "e94ed326-8f56-4933-8616-5814505b58f5" (UID: "e94ed326-8f56-4933-8616-5814505b58f5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.038314 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e94ed326-8f56-4933-8616-5814505b58f5-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "e94ed326-8f56-4933-8616-5814505b58f5" (UID: "e94ed326-8f56-4933-8616-5814505b58f5"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.053331 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11de2666-488c-4ccd-8d33-37e9e957a8c8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "11de2666-488c-4ccd-8d33-37e9e957a8c8" (UID: "11de2666-488c-4ccd-8d33-37e9e957a8c8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.055375 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6cd2b801-83a4-410f-a555-8dfda270713a-logs" (OuterVolumeSpecName: "logs") pod "6cd2b801-83a4-410f-a555-8dfda270713a" (UID: "6cd2b801-83a4-410f-a555-8dfda270713a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.057213 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11de2666-488c-4ccd-8d33-37e9e957a8c8-kube-api-access-9mzls" (OuterVolumeSpecName: "kube-api-access-9mzls") pod "11de2666-488c-4ccd-8d33-37e9e957a8c8" (UID: "11de2666-488c-4ccd-8d33-37e9e957a8c8"). InnerVolumeSpecName "kube-api-access-9mzls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.082127 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.083768 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e94ed326-8f56-4933-8616-5814505b58f5-kube-api-access-5vrbv" (OuterVolumeSpecName: "kube-api-access-5vrbv") pod "e94ed326-8f56-4933-8616-5814505b58f5" (UID: "e94ed326-8f56-4933-8616-5814505b58f5"). InnerVolumeSpecName "kube-api-access-5vrbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.090530 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.092505 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-scripts" (OuterVolumeSpecName: "scripts") pod "6cd2b801-83a4-410f-a555-8dfda270713a" (UID: "6cd2b801-83a4-410f-a555-8dfda270713a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.108076 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cd2b801-83a4-410f-a555-8dfda270713a-kube-api-access-hm6qc" (OuterVolumeSpecName: "kube-api-access-hm6qc") pod "6cd2b801-83a4-410f-a555-8dfda270713a" (UID: "6cd2b801-83a4-410f-a555-8dfda270713a"). InnerVolumeSpecName "kube-api-access-hm6qc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.113850 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-scripts" (OuterVolumeSpecName: "scripts") pod "e94ed326-8f56-4933-8616-5814505b58f5" (UID: "e94ed326-8f56-4933-8616-5814505b58f5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: W0106 08:38:32.129964 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34efb561_da62_425b_bb70_115757e6d00d.slice/crio-06baa117f43cbceec48e98840c75f0fb4bf7aeee4cdbe942d91de61b792afb1e WatchSource:0}: Error finding container 06baa117f43cbceec48e98840c75f0fb4bf7aeee4cdbe942d91de61b792afb1e: Status 404 returned error can't find the container with id 06baa117f43cbceec48e98840c75f0fb4bf7aeee4cdbe942d91de61b792afb1e Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.131180 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "e94ed326-8f56-4933-8616-5814505b58f5" (UID: "e94ed326-8f56-4933-8616-5814505b58f5"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.132906 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-52bvq\" (UniqueName: \"kubernetes.io/projected/15cd1678-570e-47b5-bcb0-6745b8aa95cb-kube-api-access-52bvq\") pod \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.132970 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/15cd1678-570e-47b5-bcb0-6745b8aa95cb-logs\") pod \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.133035 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-combined-ca-bundle\") pod \"f1733b30-f3c1-414f-9140-f42583e97d31\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.133116 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-internal-tls-certs\") pod \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.133264 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-config-data-custom\") pod \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.133313 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/15cd1678-570e-47b5-bcb0-6745b8aa95cb-etc-machine-id\") pod \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.133354 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-kube-state-metrics-tls-config\") pod \"f1733b30-f3c1-414f-9140-f42583e97d31\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.133431 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-kube-state-metrics-tls-certs\") pod \"f1733b30-f3c1-414f-9140-f42583e97d31\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.133490 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-scripts\") pod \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.134054 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-public-tls-certs\") pod \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.134408 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dg595\" (UniqueName: \"kubernetes.io/projected/f1733b30-f3c1-414f-9140-f42583e97d31-kube-api-access-dg595\") pod \"f1733b30-f3c1-414f-9140-f42583e97d31\" (UID: \"f1733b30-f3c1-414f-9140-f42583e97d31\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.134484 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-config-data\") pod \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.134582 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-combined-ca-bundle\") pod \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\" (UID: \"15cd1678-570e-47b5-bcb0-6745b8aa95cb\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.135863 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-operator-scripts\") pod \"keystone-dc75-account-create-update-l22h6\" (UID: \"ed8af5aa-332a-4950-8bbb-ab976b7a2b64\") " pod="openstack/keystone-dc75-account-create-update-l22h6" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.136049 4784 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e94ed326-8f56-4933-8616-5814505b58f5-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.136070 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hm6qc\" (UniqueName: \"kubernetes.io/projected/6cd2b801-83a4-410f-a555-8dfda270713a-kube-api-access-hm6qc\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.136084 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11de2666-488c-4ccd-8d33-37e9e957a8c8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.136099 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.136110 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6cd2b801-83a4-410f-a555-8dfda270713a-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.136121 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.136150 4784 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.136454 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e94ed326-8f56-4933-8616-5814505b58f5-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.136475 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mzls\" (UniqueName: \"kubernetes.io/projected/11de2666-488c-4ccd-8d33-37e9e957a8c8-kube-api-access-9mzls\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.136493 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vrbv\" (UniqueName: \"kubernetes.io/projected/e94ed326-8f56-4933-8616-5814505b58f5-kube-api-access-5vrbv\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: E0106 08:38:32.152459 4784 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 06 08:38:32 crc kubenswrapper[4784]: E0106 08:38:32.152566 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-operator-scripts podName:ed8af5aa-332a-4950-8bbb-ab976b7a2b64 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:33.152517186 +0000 UTC m=+1415.198690023 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-operator-scripts") pod "keystone-dc75-account-create-update-l22h6" (UID: "ed8af5aa-332a-4950-8bbb-ab976b7a2b64") : configmap "openstack-scripts" not found Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.152778 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15cd1678-570e-47b5-bcb0-6745b8aa95cb-logs" (OuterVolumeSpecName: "logs") pod "15cd1678-570e-47b5-bcb0-6745b8aa95cb" (UID: "15cd1678-570e-47b5-bcb0-6745b8aa95cb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.152980 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/15cd1678-570e-47b5-bcb0-6745b8aa95cb-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "15cd1678-570e-47b5-bcb0-6745b8aa95cb" (UID: "15cd1678-570e-47b5-bcb0-6745b8aa95cb"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: E0106 08:38:32.155031 4784 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 06 08:38:32 crc kubenswrapper[4784]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 06 08:38:32 crc kubenswrapper[4784]: Jan 06 08:38:32 crc kubenswrapper[4784]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 06 08:38:32 crc kubenswrapper[4784]: Jan 06 08:38:32 crc kubenswrapper[4784]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 06 08:38:32 crc kubenswrapper[4784]: Jan 06 08:38:32 crc kubenswrapper[4784]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 06 08:38:32 crc kubenswrapper[4784]: Jan 06 08:38:32 crc kubenswrapper[4784]: if [ -n "" ]; then Jan 06 08:38:32 crc kubenswrapper[4784]: GRANT_DATABASE="" Jan 06 08:38:32 crc kubenswrapper[4784]: else Jan 06 08:38:32 crc kubenswrapper[4784]: GRANT_DATABASE="*" Jan 06 08:38:32 crc kubenswrapper[4784]: fi Jan 06 08:38:32 crc kubenswrapper[4784]: Jan 06 08:38:32 crc kubenswrapper[4784]: # going for maximum compatibility here: Jan 06 08:38:32 crc kubenswrapper[4784]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 06 08:38:32 crc kubenswrapper[4784]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 06 08:38:32 crc kubenswrapper[4784]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 06 08:38:32 crc kubenswrapper[4784]: # support updates Jan 06 08:38:32 crc kubenswrapper[4784]: Jan 06 08:38:32 crc kubenswrapper[4784]: $MYSQL_CMD < logger="UnhandledError" Jan 06 08:38:32 crc kubenswrapper[4784]: E0106 08:38:32.156574 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-n9mzl" podUID="34efb561-da62-425b-bb70-115757e6d00d" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.172374 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1733b30-f3c1-414f-9140-f42583e97d31-kube-api-access-dg595" (OuterVolumeSpecName: "kube-api-access-dg595") pod "f1733b30-f3c1-414f-9140-f42583e97d31" (UID: "f1733b30-f3c1-414f-9140-f42583e97d31"). InnerVolumeSpecName "kube-api-access-dg595". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.185435 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-scripts" (OuterVolumeSpecName: "scripts") pod "15cd1678-570e-47b5-bcb0-6745b8aa95cb" (UID: "15cd1678-570e-47b5-bcb0-6745b8aa95cb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.186104 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e94ed326-8f56-4933-8616-5814505b58f5" (UID: "e94ed326-8f56-4933-8616-5814505b58f5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.187027 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "15cd1678-570e-47b5-bcb0-6745b8aa95cb" (UID: "15cd1678-570e-47b5-bcb0-6745b8aa95cb"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.188197 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15cd1678-570e-47b5-bcb0-6745b8aa95cb-kube-api-access-52bvq" (OuterVolumeSpecName: "kube-api-access-52bvq") pod "15cd1678-570e-47b5-bcb0-6745b8aa95cb" (UID: "15cd1678-570e-47b5-bcb0-6745b8aa95cb"). InnerVolumeSpecName "kube-api-access-52bvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.188248 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-config-data" (OuterVolumeSpecName: "config-data") pod "e94ed326-8f56-4933-8616-5814505b58f5" (UID: "e94ed326-8f56-4933-8616-5814505b58f5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.227408 4784 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.236427 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-n9mzl"] Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.240067 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-52bvq\" (UniqueName: \"kubernetes.io/projected/15cd1678-570e-47b5-bcb0-6745b8aa95cb-kube-api-access-52bvq\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.240090 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/15cd1678-570e-47b5-bcb0-6745b8aa95cb-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.240101 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.240111 4784 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.240124 4784 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.240134 4784 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/15cd1678-570e-47b5-bcb0-6745b8aa95cb-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.240143 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.240152 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.240166 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dg595\" (UniqueName: \"kubernetes.io/projected/f1733b30-f3c1-414f-9140-f42583e97d31-kube-api-access-dg595\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.249897 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6cd2b801-83a4-410f-a555-8dfda270713a" (UID: "6cd2b801-83a4-410f-a555-8dfda270713a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.261142 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.338664 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "e94ed326-8f56-4933-8616-5814505b58f5" (UID: "e94ed326-8f56-4933-8616-5814505b58f5"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.341683 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-nova-metadata-tls-certs\") pod \"79cac775-c143-4370-bf3b-b25e2ca62120\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.341768 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-config-data\") pod \"79cac775-c143-4370-bf3b-b25e2ca62120\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.342002 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/79cac775-c143-4370-bf3b-b25e2ca62120-logs\") pod \"79cac775-c143-4370-bf3b-b25e2ca62120\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.342025 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4f2v\" (UniqueName: \"kubernetes.io/projected/79cac775-c143-4370-bf3b-b25e2ca62120-kube-api-access-v4f2v\") pod \"79cac775-c143-4370-bf3b-b25e2ca62120\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.342088 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-combined-ca-bundle\") pod \"79cac775-c143-4370-bf3b-b25e2ca62120\" (UID: \"79cac775-c143-4370-bf3b-b25e2ca62120\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.342527 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.342561 4784 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e94ed326-8f56-4933-8616-5814505b58f5-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.343612 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79cac775-c143-4370-bf3b-b25e2ca62120-logs" (OuterVolumeSpecName: "logs") pod "79cac775-c143-4370-bf3b-b25e2ca62120" (UID: "79cac775-c143-4370-bf3b-b25e2ca62120"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.348652 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "15cd1678-570e-47b5-bcb0-6745b8aa95cb" (UID: "15cd1678-570e-47b5-bcb0-6745b8aa95cb"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.357226 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a98e0fb-025b-4a3c-8bbb-2a2f20132e38" path="/var/lib/kubelet/pods/0a98e0fb-025b-4a3c-8bbb-2a2f20132e38/volumes" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.357990 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32e811b4-672d-4aa2-905b-9406f594be5c" path="/var/lib/kubelet/pods/32e811b4-672d-4aa2-905b-9406f594be5c/volumes" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.358626 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3" path="/var/lib/kubelet/pods/5ec97fe8-08d9-4c08-8f59-7f2ee13de3b3/volumes" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.359816 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f153c14-0bd9-4c9f-a8fc-c54c80722bce" path="/var/lib/kubelet/pods/5f153c14-0bd9-4c9f-a8fc-c54c80722bce/volumes" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.360488 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83a1fa13-fedb-4baf-bd94-7ce99bbeefab" path="/var/lib/kubelet/pods/83a1fa13-fedb-4baf-bd94-7ce99bbeefab/volumes" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.361143 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98272c8d-18ec-4660-9be3-ea08362a5b57" path="/var/lib/kubelet/pods/98272c8d-18ec-4660-9be3-ea08362a5b57/volumes" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.362278 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a" path="/var/lib/kubelet/pods/b8535d9e-d6b7-4c2b-bb51-acfc804a5f0a/volumes" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.362725 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de90cd55-5430-48c7-b519-a4398e495607" path="/var/lib/kubelet/pods/de90cd55-5430-48c7-b519-a4398e495607/volumes" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.363238 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0e26028-4d3e-493a-859e-2dd0149d7174" path="/var/lib/kubelet/pods/e0e26028-4d3e-493a-859e-2dd0149d7174/volumes" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.363471 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-config-data" (OuterVolumeSpecName: "config-data") pod "15cd1678-570e-47b5-bcb0-6745b8aa95cb" (UID: "15cd1678-570e-47b5-bcb0-6745b8aa95cb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.365254 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2" path="/var/lib/kubelet/pods/ee49843c-6b8d-4c1c-8b7e-8fd8abc163e2/volumes" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.367062 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fecd8c1e-482d-4469-a884-c357e0e66fe0" path="/var/lib/kubelet/pods/fecd8c1e-482d-4469-a884-c357e0e66fe0/volumes" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.371426 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-config-data" (OuterVolumeSpecName: "config-data") pod "6cd2b801-83a4-410f-a555-8dfda270713a" (UID: "6cd2b801-83a4-410f-a555-8dfda270713a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.372627 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79cac775-c143-4370-bf3b-b25e2ca62120-kube-api-access-v4f2v" (OuterVolumeSpecName: "kube-api-access-v4f2v") pod "79cac775-c143-4370-bf3b-b25e2ca62120" (UID: "79cac775-c143-4370-bf3b-b25e2ca62120"). InnerVolumeSpecName "kube-api-access-v4f2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.375120 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f1733b30-f3c1-414f-9140-f42583e97d31" (UID: "f1733b30-f3c1-414f-9140-f42583e97d31"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: E0106 08:38:32.394985 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c2c0ff5bb0a05540e0092ee7d0986a984f68d0ed56bed0238c7fd744e6c37ee5" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 06 08:38:32 crc kubenswrapper[4784]: E0106 08:38:32.402579 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c2c0ff5bb0a05540e0092ee7d0986a984f68d0ed56bed0238c7fd744e6c37ee5" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 06 08:38:32 crc kubenswrapper[4784]: E0106 08:38:32.407255 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c2c0ff5bb0a05540e0092ee7d0986a984f68d0ed56bed0238c7fd744e6c37ee5" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 06 08:38:32 crc kubenswrapper[4784]: E0106 08:38:32.407306 4784 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="4dd5733d-6502-4030-a012-be296b7d11c1" containerName="nova-cell0-conductor-conductor" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.413233 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "f1733b30-f3c1-414f-9140-f42583e97d31" (UID: "f1733b30-f3c1-414f-9140-f42583e97d31"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.415003 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "15cd1678-570e-47b5-bcb0-6745b8aa95cb" (UID: "15cd1678-570e-47b5-bcb0-6745b8aa95cb"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.426523 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "79cac775-c143-4370-bf3b-b25e2ca62120" (UID: "79cac775-c143-4370-bf3b-b25e2ca62120"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.439528 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-config-data" (OuterVolumeSpecName: "config-data") pod "79cac775-c143-4370-bf3b-b25e2ca62120" (UID: "79cac775-c143-4370-bf3b-b25e2ca62120"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.448377 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.448630 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.448642 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.448653 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.448666 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/79cac775-c143-4370-bf3b-b25e2ca62120-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.448674 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4f2v\" (UniqueName: \"kubernetes.io/projected/79cac775-c143-4370-bf3b-b25e2ca62120-kube-api-access-v4f2v\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.448683 4784 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.448693 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.448704 4784 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.448713 4784 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.454528 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "15cd1678-570e-47b5-bcb0-6745b8aa95cb" (UID: "15cd1678-570e-47b5-bcb0-6745b8aa95cb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.486426 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "f1733b30-f3c1-414f-9140-f42583e97d31" (UID: "f1733b30-f3c1-414f-9140-f42583e97d31"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.490533 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "79cac775-c143-4370-bf3b-b25e2ca62120" (UID: "79cac775-c143-4370-bf3b-b25e2ca62120"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.496203 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6cd2b801-83a4-410f-a555-8dfda270713a" (UID: "6cd2b801-83a4-410f-a555-8dfda270713a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.516787 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "6cd2b801-83a4-410f-a555-8dfda270713a" (UID: "6cd2b801-83a4-410f-a555-8dfda270713a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.555211 4784 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.555252 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15cd1678-570e-47b5-bcb0-6745b8aa95cb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.555262 4784 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1733b30-f3c1-414f-9140-f42583e97d31-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.555275 4784 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cd2b801-83a4-410f-a555-8dfda270713a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.555284 4784 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/79cac775-c143-4370-bf3b-b25e2ca62120-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.642073 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="052ecaa6-58fd-42ed-b2c5-6b8919470619" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.716366 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7e75-account-create-update-95l2w" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.761323 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b3f9f481-e72f-47a3-bd2c-33ec9bb8025b-operator-scripts\") pod \"b3f9f481-e72f-47a3-bd2c-33ec9bb8025b\" (UID: \"b3f9f481-e72f-47a3-bd2c-33ec9bb8025b\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.761504 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7pttk\" (UniqueName: \"kubernetes.io/projected/b3f9f481-e72f-47a3-bd2c-33ec9bb8025b-kube-api-access-7pttk\") pod \"b3f9f481-e72f-47a3-bd2c-33ec9bb8025b\" (UID: \"b3f9f481-e72f-47a3-bd2c-33ec9bb8025b\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.764565 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3f9f481-e72f-47a3-bd2c-33ec9bb8025b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b3f9f481-e72f-47a3-bd2c-33ec9bb8025b" (UID: "b3f9f481-e72f-47a3-bd2c-33ec9bb8025b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.773670 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3f9f481-e72f-47a3-bd2c-33ec9bb8025b-kube-api-access-7pttk" (OuterVolumeSpecName: "kube-api-access-7pttk") pod "b3f9f481-e72f-47a3-bd2c-33ec9bb8025b" (UID: "b3f9f481-e72f-47a3-bd2c-33ec9bb8025b"). InnerVolumeSpecName "kube-api-access-7pttk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.824044 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:38:32 crc kubenswrapper[4784]: E0106 08:38:32.848501 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 06 08:38:32 crc kubenswrapper[4784]: E0106 08:38:32.854274 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 06 08:38:32 crc kubenswrapper[4784]: E0106 08:38:32.860653 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 06 08:38:32 crc kubenswrapper[4784]: E0106 08:38:32.860740 4784 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="1c98eb91-7877-4dd7-b694-52b017726242" containerName="ovn-northd" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.865089 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7pttk\" (UniqueName: \"kubernetes.io/projected/b3f9f481-e72f-47a3-bd2c-33ec9bb8025b-kube-api-access-7pttk\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.865126 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b3f9f481-e72f-47a3-bd2c-33ec9bb8025b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.877054 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5759d5cbc4-2r87d" event={"ID":"6cd2b801-83a4-410f-a555-8dfda270713a","Type":"ContainerDied","Data":"9eac8dbb220c2ed526d704e524ff84da3f345205b87705c844159d8cfa7b7a7b"} Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.877149 4784 scope.go:117] "RemoveContainer" containerID="7edfdbc25e2ee69a5e5b15b59c0c29dcf7e91b34fc5883ef77bb32176ff55cd7" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.877326 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5759d5cbc4-2r87d" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.902535 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.902760 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f1733b30-f3c1-414f-9140-f42583e97d31","Type":"ContainerDied","Data":"7c53dd49a7ff8ddf0cbc34e0235ddc869165070d048f89a8bb5c939a53991212"} Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.903041 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.905213 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c594-account-create-update-qhcs2" event={"ID":"11de2666-488c-4ccd-8d33-37e9e957a8c8","Type":"ContainerDied","Data":"4b272acebe1adaf9541632ec59f9a50b41fdceb1018c61d25254df6b25783882"} Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.906417 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c594-account-create-update-qhcs2" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.922018 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e94ed326-8f56-4933-8616-5814505b58f5","Type":"ContainerDied","Data":"82ac278234fa68eef222653ae3d0d395045b228175b6773e69a06dcbb04b7af7"} Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.922189 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.932923 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-sx6jh" event={"ID":"f364fb9d-ca92-487a-9e6f-6d85a97117d0","Type":"ContainerDied","Data":"f36fb7f267e93c7667782d8010e59e423efd806358dba3656169e514e93d72a2"} Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.933182 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-sx6jh" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.934605 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.935575 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5759d5cbc4-2r87d"] Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.936463 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"15cd1678-570e-47b5-bcb0-6745b8aa95cb","Type":"ContainerDied","Data":"308fa13267e4be01707682d19351f9c98d583de145ec312e6ae995eb800699ba"} Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.936554 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.951187 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"4dd5733d-6502-4030-a012-be296b7d11c1","Type":"ContainerDied","Data":"c2c0ff5bb0a05540e0092ee7d0986a984f68d0ed56bed0238c7fd744e6c37ee5"} Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.950413 4784 generic.go:334] "Generic (PLEG): container finished" podID="4dd5733d-6502-4030-a012-be296b7d11c1" containerID="c2c0ff5bb0a05540e0092ee7d0986a984f68d0ed56bed0238c7fd744e6c37ee5" exitCode=0 Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.954060 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7e75-account-create-update-95l2w" event={"ID":"b3f9f481-e72f-47a3-bd2c-33ec9bb8025b","Type":"ContainerDied","Data":"93866ea0163be68c7c005b0ae457e39c238a9afe82f33c78d32ee4b095bf5033"} Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.954171 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7e75-account-create-update-95l2w" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.960030 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-n9mzl" event={"ID":"34efb561-da62-425b-bb70-115757e6d00d","Type":"ContainerStarted","Data":"06baa117f43cbceec48e98840c75f0fb4bf7aeee4cdbe942d91de61b792afb1e"} Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.966260 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kg7n5\" (UniqueName: \"kubernetes.io/projected/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-kube-api-access-kg7n5\") pod \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.966336 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-internal-tls-certs\") pod \"19f8ed37-5996-433b-9915-97489c1d8f11\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.966380 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-config-data\") pod \"19f8ed37-5996-433b-9915-97489c1d8f11\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.966452 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-internal-tls-certs\") pod \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.966509 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqlk5\" (UniqueName: \"kubernetes.io/projected/19f8ed37-5996-433b-9915-97489c1d8f11-kube-api-access-jqlk5\") pod \"19f8ed37-5996-433b-9915-97489c1d8f11\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.966588 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-logs\") pod \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.966665 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19f8ed37-5996-433b-9915-97489c1d8f11-logs\") pod \"19f8ed37-5996-433b-9915-97489c1d8f11\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.966700 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-public-tls-certs\") pod \"19f8ed37-5996-433b-9915-97489c1d8f11\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.966742 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-config-data\") pod \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.966791 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-combined-ca-bundle\") pod \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.966834 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-combined-ca-bundle\") pod \"19f8ed37-5996-433b-9915-97489c1d8f11\" (UID: \"19f8ed37-5996-433b-9915-97489c1d8f11\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.966869 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-public-tls-certs\") pod \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.966902 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-config-data-custom\") pod \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\" (UID: \"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1\") " Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.967714 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.968583 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-5759d5cbc4-2r87d"] Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.968623 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"79cac775-c143-4370-bf3b-b25e2ca62120","Type":"ContainerDied","Data":"ee67f7bb0e1c5ca073830d65cf12e2aa98102c9c26ff9beebb0b46752ce0beac"} Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.969443 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19f8ed37-5996-433b-9915-97489c1d8f11-logs" (OuterVolumeSpecName: "logs") pod "19f8ed37-5996-433b-9915-97489c1d8f11" (UID: "19f8ed37-5996-433b-9915-97489c1d8f11"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.969524 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-logs" (OuterVolumeSpecName: "logs") pod "db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" (UID: "db32cfd9-0522-4dc5-b8fd-0cb61d08efd1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.972190 4784 scope.go:117] "RemoveContainer" containerID="07f60e1630bdcd2399c45169de65854f62b21633e41ebfe502b28d51d39226cc" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.983375 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-kube-api-access-kg7n5" (OuterVolumeSpecName: "kube-api-access-kg7n5") pod "db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" (UID: "db32cfd9-0522-4dc5-b8fd-0cb61d08efd1"). InnerVolumeSpecName "kube-api-access-kg7n5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.994593 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" (UID: "db32cfd9-0522-4dc5-b8fd-0cb61d08efd1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.994938 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84c65dd87b-gpr7l" event={"ID":"db32cfd9-0522-4dc5-b8fd-0cb61d08efd1","Type":"ContainerDied","Data":"88b494cb7dd981f0cd85a374cc0ff0b0daec7659dd58f2b8d3b6341cb598c581"} Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.995053 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-84c65dd87b-gpr7l" Jan 06 08:38:32 crc kubenswrapper[4784]: I0106 08:38:32.996395 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="41c89df0-d35f-4f47-86f3-71a2c0971d79" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.103:5671: connect: connection refused" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.016568 4784 generic.go:334] "Generic (PLEG): container finished" podID="19f8ed37-5996-433b-9915-97489c1d8f11" containerID="7947cb94b8d88ad4df644dc547771879ad687dece99ce064512f4521ede7a217" exitCode=0 Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.016652 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"19f8ed37-5996-433b-9915-97489c1d8f11","Type":"ContainerDied","Data":"7947cb94b8d88ad4df644dc547771879ad687dece99ce064512f4521ede7a217"} Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.016684 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"19f8ed37-5996-433b-9915-97489c1d8f11","Type":"ContainerDied","Data":"30be62fa6f5e59d59e40f41f49aacc0bb8277eb581cb78bfa26519f607f5bba7"} Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.016762 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.019361 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19f8ed37-5996-433b-9915-97489c1d8f11-kube-api-access-jqlk5" (OuterVolumeSpecName: "kube-api-access-jqlk5") pod "19f8ed37-5996-433b-9915-97489c1d8f11" (UID: "19f8ed37-5996-433b-9915-97489c1d8f11"). InnerVolumeSpecName "kube-api-access-jqlk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.031276 4784 scope.go:117] "RemoveContainer" containerID="6b3f7fd4b97b2de294cdc7d27d5014fd5bf46b02dd68f76b1ad9fb9236ae1bba" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.031828 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19f8ed37-5996-433b-9915-97489c1d8f11" (UID: "19f8ed37-5996-433b-9915-97489c1d8f11"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.039350 4784 generic.go:334] "Generic (PLEG): container finished" podID="bed6a7b9-0069-4ea7-b813-70a5808d18db" containerID="3dbabc5c6ed9e67890a74683afbfe123a07fdb5e9564f6fd5fe7f8edcd519e4c" exitCode=0 Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.039441 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-849db5db7c-vjb4f" event={"ID":"bed6a7b9-0069-4ea7-b813-70a5808d18db","Type":"ContainerDied","Data":"3dbabc5c6ed9e67890a74683afbfe123a07fdb5e9564f6fd5fe7f8edcd519e4c"} Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.039621 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc75-account-create-update-l22h6" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.039686 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-849db5db7c-vjb4f" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.049606 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-sx6jh"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.066362 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-sx6jh"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.068327 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-public-tls-certs\") pod \"bed6a7b9-0069-4ea7-b813-70a5808d18db\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.068433 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-config\") pod \"bed6a7b9-0069-4ea7-b813-70a5808d18db\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.068453 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-ovndb-tls-certs\") pod \"bed6a7b9-0069-4ea7-b813-70a5808d18db\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.068474 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-combined-ca-bundle\") pod \"bed6a7b9-0069-4ea7-b813-70a5808d18db\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.068509 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-internal-tls-certs\") pod \"bed6a7b9-0069-4ea7-b813-70a5808d18db\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.068603 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmdjs\" (UniqueName: \"kubernetes.io/projected/bed6a7b9-0069-4ea7-b813-70a5808d18db-kube-api-access-rmdjs\") pod \"bed6a7b9-0069-4ea7-b813-70a5808d18db\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.068696 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-httpd-config\") pod \"bed6a7b9-0069-4ea7-b813-70a5808d18db\" (UID: \"bed6a7b9-0069-4ea7-b813-70a5808d18db\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.074293 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrl6s\" (UniqueName: \"kubernetes.io/projected/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-kube-api-access-nrl6s\") pod \"keystone-dc75-account-create-update-l22h6\" (UID: \"ed8af5aa-332a-4950-8bbb-ab976b7a2b64\") " pod="openstack/keystone-dc75-account-create-update-l22h6" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.074730 4784 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.074749 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kg7n5\" (UniqueName: \"kubernetes.io/projected/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-kube-api-access-kg7n5\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.074765 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqlk5\" (UniqueName: \"kubernetes.io/projected/19f8ed37-5996-433b-9915-97489c1d8f11-kube-api-access-jqlk5\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.074779 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.074793 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19f8ed37-5996-433b-9915-97489c1d8f11-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.074804 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.077769 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-config-data" (OuterVolumeSpecName: "config-data") pod "19f8ed37-5996-433b-9915-97489c1d8f11" (UID: "19f8ed37-5996-433b-9915-97489c1d8f11"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.087489 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" (UID: "db32cfd9-0522-4dc5-b8fd-0cb61d08efd1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: E0106 08:38:33.092348 4784 projected.go:194] Error preparing data for projected volume kube-api-access-nrl6s for pod openstack/keystone-dc75-account-create-update-l22h6: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 06 08:38:33 crc kubenswrapper[4784]: E0106 08:38:33.092434 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-kube-api-access-nrl6s podName:ed8af5aa-332a-4950-8bbb-ab976b7a2b64 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:35.092407457 +0000 UTC m=+1417.138580294 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-nrl6s" (UniqueName: "kubernetes.io/projected/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-kube-api-access-nrl6s") pod "keystone-dc75-account-create-update-l22h6" (UID: "ed8af5aa-332a-4950-8bbb-ab976b7a2b64") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.096056 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bed6a7b9-0069-4ea7-b813-70a5808d18db-kube-api-access-rmdjs" (OuterVolumeSpecName: "kube-api-access-rmdjs") pod "bed6a7b9-0069-4ea7-b813-70a5808d18db" (UID: "bed6a7b9-0069-4ea7-b813-70a5808d18db"). InnerVolumeSpecName "kube-api-access-rmdjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.104317 4784 scope.go:117] "RemoveContainer" containerID="54f4dd31065d6db90e2a8b67fbc54756e16347bc95f37040733d2d6e10eb17a1" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.106397 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.119110 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.123613 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "bed6a7b9-0069-4ea7-b813-70a5808d18db" (UID: "bed6a7b9-0069-4ea7-b813-70a5808d18db"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.143709 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.152157 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-config-data" (OuterVolumeSpecName: "config-data") pod "db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" (UID: "db32cfd9-0522-4dc5-b8fd-0cb61d08efd1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.153695 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.158698 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.163971 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.177419 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-operator-scripts\") pod \"keystone-dc75-account-create-update-l22h6\" (UID: \"ed8af5aa-332a-4950-8bbb-ab976b7a2b64\") " pod="openstack/keystone-dc75-account-create-update-l22h6" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.177571 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.177584 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmdjs\" (UniqueName: \"kubernetes.io/projected/bed6a7b9-0069-4ea7-b813-70a5808d18db-kube-api-access-rmdjs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.177643 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.177658 4784 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.177691 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: E0106 08:38:33.177743 4784 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 06 08:38:33 crc kubenswrapper[4784]: E0106 08:38:33.177830 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-operator-scripts podName:ed8af5aa-332a-4950-8bbb-ab976b7a2b64 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:35.177804867 +0000 UTC m=+1417.223977694 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-operator-scripts") pod "keystone-dc75-account-create-update-l22h6" (UID: "ed8af5aa-332a-4950-8bbb-ab976b7a2b64") : configmap "openstack-scripts" not found Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.178223 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "19f8ed37-5996-433b-9915-97489c1d8f11" (UID: "19f8ed37-5996-433b-9915-97489c1d8f11"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.180190 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-c594-account-create-update-qhcs2"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.180949 4784 scope.go:117] "RemoveContainer" containerID="538fb0d95d0196cf8efa1743e88a29f2ea31f008245abb93870ac9e8829ca9a8" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.180971 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "19f8ed37-5996-433b-9915-97489c1d8f11" (UID: "19f8ed37-5996-433b-9915-97489c1d8f11"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.188284 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-c594-account-create-update-qhcs2"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.190289 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "bed6a7b9-0069-4ea7-b813-70a5808d18db" (UID: "bed6a7b9-0069-4ea7-b813-70a5808d18db"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.219244 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "bed6a7b9-0069-4ea7-b813-70a5808d18db" (UID: "bed6a7b9-0069-4ea7-b813-70a5808d18db"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.221664 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.225752 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" (UID: "db32cfd9-0522-4dc5-b8fd-0cb61d08efd1"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.230526 4784 scope.go:117] "RemoveContainer" containerID="952523238c0159b22bae45fd0feb984398f4e41c261b86b0227306adbbc37885" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.231412 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.260782 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "bed6a7b9-0069-4ea7-b813-70a5808d18db" (UID: "bed6a7b9-0069-4ea7-b813-70a5808d18db"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.263634 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-7e75-account-create-update-95l2w"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.272344 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-7e75-account-create-update-95l2w"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.279164 4784 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.279196 4784 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.279209 4784 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.279221 4784 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.279232 4784 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.279243 4784 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19f8ed37-5996-433b-9915-97489c1d8f11-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.283907 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" (UID: "db32cfd9-0522-4dc5-b8fd-0cb61d08efd1"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.288935 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-config" (OuterVolumeSpecName: "config") pod "bed6a7b9-0069-4ea7-b813-70a5808d18db" (UID: "bed6a7b9-0069-4ea7-b813-70a5808d18db"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.315628 4784 scope.go:117] "RemoveContainer" containerID="9f1c3888df0343f9b1d8b44706fe2ef8ae987e71456454a609fae003c73320e1" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.345911 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-dc75-account-create-update-l22h6"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.371579 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bed6a7b9-0069-4ea7-b813-70a5808d18db" (UID: "bed6a7b9-0069-4ea7-b813-70a5808d18db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.376110 4784 scope.go:117] "RemoveContainer" containerID="00d276555cdbfb9bcc2cbe375f52631931dd7b8c8efe7fb96c6f9c7ad948b1ab" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.387497 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-dc75-account-create-update-l22h6"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.391345 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.391380 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bed6a7b9-0069-4ea7-b813-70a5808d18db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.391401 4784 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.397491 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-84c65dd87b-gpr7l"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.409369 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-84c65dd87b-gpr7l"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.418830 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.427499 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.439789 4784 scope.go:117] "RemoveContainer" containerID="fd90bc399e2a254f94bbf15631cc9bd41d01a3e85ba8564aafc267bb679d9db4" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.447858 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.462598 4784 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-867cd545c7-xt7gs" podUID="8e67aeba-582a-470f-a40f-e1def33f01d2" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.198:5353: i/o timeout" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.492591 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dd5733d-6502-4030-a012-be296b7d11c1-config-data\") pod \"4dd5733d-6502-4030-a012-be296b7d11c1\" (UID: \"4dd5733d-6502-4030-a012-be296b7d11c1\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.492982 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dd5733d-6502-4030-a012-be296b7d11c1-combined-ca-bundle\") pod \"4dd5733d-6502-4030-a012-be296b7d11c1\" (UID: \"4dd5733d-6502-4030-a012-be296b7d11c1\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.493756 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w592f\" (UniqueName: \"kubernetes.io/projected/4dd5733d-6502-4030-a012-be296b7d11c1-kube-api-access-w592f\") pod \"4dd5733d-6502-4030-a012-be296b7d11c1\" (UID: \"4dd5733d-6502-4030-a012-be296b7d11c1\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.494393 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrl6s\" (UniqueName: \"kubernetes.io/projected/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-kube-api-access-nrl6s\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.494410 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed8af5aa-332a-4950-8bbb-ab976b7a2b64-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.499570 4784 scope.go:117] "RemoveContainer" containerID="6fffca0776ab46872f3102c7fc0a42be2362bb53a3780f33a08136d43b9b8eae" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.500509 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dd5733d-6502-4030-a012-be296b7d11c1-kube-api-access-w592f" (OuterVolumeSpecName: "kube-api-access-w592f") pod "4dd5733d-6502-4030-a012-be296b7d11c1" (UID: "4dd5733d-6502-4030-a012-be296b7d11c1"). InnerVolumeSpecName "kube-api-access-w592f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.536615 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dd5733d-6502-4030-a012-be296b7d11c1-config-data" (OuterVolumeSpecName: "config-data") pod "4dd5733d-6502-4030-a012-be296b7d11c1" (UID: "4dd5733d-6502-4030-a012-be296b7d11c1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.577059 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dd5733d-6502-4030-a012-be296b7d11c1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4dd5733d-6502-4030-a012-be296b7d11c1" (UID: "4dd5733d-6502-4030-a012-be296b7d11c1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: E0106 08:38:33.597216 4784 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 06 08:38:33 crc kubenswrapper[4784]: E0106 08:38:33.597379 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data podName:41c89df0-d35f-4f47-86f3-71a2c0971d79 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:41.597356384 +0000 UTC m=+1423.643529221 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data") pod "rabbitmq-cell1-server-0" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79") : configmap "rabbitmq-cell1-config-data" not found Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.597217 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dd5733d-6502-4030-a012-be296b7d11c1-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.597536 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dd5733d-6502-4030-a012-be296b7d11c1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.597565 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w592f\" (UniqueName: \"kubernetes.io/projected/4dd5733d-6502-4030-a012-be296b7d11c1-kube-api-access-w592f\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.694286 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-n9mzl" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.709124 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.709692 4784 scope.go:117] "RemoveContainer" containerID="fb3044be153df30f3ff3fd00d5cb70d02783bb32a511ab902bb2119d0727fe42" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.712811 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-849db5db7c-vjb4f"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.719183 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-849db5db7c-vjb4f"] Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.736145 4784 scope.go:117] "RemoveContainer" containerID="7947cb94b8d88ad4df644dc547771879ad687dece99ce064512f4521ede7a217" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.801255 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-config-data\") pod \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.801376 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-memcached-tls-certs\") pod \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.801445 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85mml\" (UniqueName: \"kubernetes.io/projected/34efb561-da62-425b-bb70-115757e6d00d-kube-api-access-85mml\") pod \"34efb561-da62-425b-bb70-115757e6d00d\" (UID: \"34efb561-da62-425b-bb70-115757e6d00d\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.801587 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-combined-ca-bundle\") pod \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.801638 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34efb561-da62-425b-bb70-115757e6d00d-operator-scripts\") pod \"34efb561-da62-425b-bb70-115757e6d00d\" (UID: \"34efb561-da62-425b-bb70-115757e6d00d\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.801687 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tgc98\" (UniqueName: \"kubernetes.io/projected/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-kube-api-access-tgc98\") pod \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.801773 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-kolla-config\") pod \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\" (UID: \"3627acbf-1c12-4e8e-97f0-e44a6cd124c3\") " Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.802340 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-config-data" (OuterVolumeSpecName: "config-data") pod "3627acbf-1c12-4e8e-97f0-e44a6cd124c3" (UID: "3627acbf-1c12-4e8e-97f0-e44a6cd124c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.802658 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.803196 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "3627acbf-1c12-4e8e-97f0-e44a6cd124c3" (UID: "3627acbf-1c12-4e8e-97f0-e44a6cd124c3"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.804150 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34efb561-da62-425b-bb70-115757e6d00d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "34efb561-da62-425b-bb70-115757e6d00d" (UID: "34efb561-da62-425b-bb70-115757e6d00d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.820439 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-kube-api-access-tgc98" (OuterVolumeSpecName: "kube-api-access-tgc98") pod "3627acbf-1c12-4e8e-97f0-e44a6cd124c3" (UID: "3627acbf-1c12-4e8e-97f0-e44a6cd124c3"). InnerVolumeSpecName "kube-api-access-tgc98". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.823891 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34efb561-da62-425b-bb70-115757e6d00d-kube-api-access-85mml" (OuterVolumeSpecName: "kube-api-access-85mml") pod "34efb561-da62-425b-bb70-115757e6d00d" (UID: "34efb561-da62-425b-bb70-115757e6d00d"). InnerVolumeSpecName "kube-api-access-85mml". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.834666 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3627acbf-1c12-4e8e-97f0-e44a6cd124c3" (UID: "3627acbf-1c12-4e8e-97f0-e44a6cd124c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.855444 4784 scope.go:117] "RemoveContainer" containerID="501fd29e0c03aad226668f3a9429c1898d430f716269513ca50915d1f53a01da" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.886297 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "3627acbf-1c12-4e8e-97f0-e44a6cd124c3" (UID: "3627acbf-1c12-4e8e-97f0-e44a6cd124c3"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.897780 4784 scope.go:117] "RemoveContainer" containerID="7947cb94b8d88ad4df644dc547771879ad687dece99ce064512f4521ede7a217" Jan 06 08:38:33 crc kubenswrapper[4784]: E0106 08:38:33.898485 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7947cb94b8d88ad4df644dc547771879ad687dece99ce064512f4521ede7a217\": container with ID starting with 7947cb94b8d88ad4df644dc547771879ad687dece99ce064512f4521ede7a217 not found: ID does not exist" containerID="7947cb94b8d88ad4df644dc547771879ad687dece99ce064512f4521ede7a217" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.898559 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7947cb94b8d88ad4df644dc547771879ad687dece99ce064512f4521ede7a217"} err="failed to get container status \"7947cb94b8d88ad4df644dc547771879ad687dece99ce064512f4521ede7a217\": rpc error: code = NotFound desc = could not find container \"7947cb94b8d88ad4df644dc547771879ad687dece99ce064512f4521ede7a217\": container with ID starting with 7947cb94b8d88ad4df644dc547771879ad687dece99ce064512f4521ede7a217 not found: ID does not exist" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.898600 4784 scope.go:117] "RemoveContainer" containerID="501fd29e0c03aad226668f3a9429c1898d430f716269513ca50915d1f53a01da" Jan 06 08:38:33 crc kubenswrapper[4784]: E0106 08:38:33.899173 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"501fd29e0c03aad226668f3a9429c1898d430f716269513ca50915d1f53a01da\": container with ID starting with 501fd29e0c03aad226668f3a9429c1898d430f716269513ca50915d1f53a01da not found: ID does not exist" containerID="501fd29e0c03aad226668f3a9429c1898d430f716269513ca50915d1f53a01da" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.899236 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"501fd29e0c03aad226668f3a9429c1898d430f716269513ca50915d1f53a01da"} err="failed to get container status \"501fd29e0c03aad226668f3a9429c1898d430f716269513ca50915d1f53a01da\": rpc error: code = NotFound desc = could not find container \"501fd29e0c03aad226668f3a9429c1898d430f716269513ca50915d1f53a01da\": container with ID starting with 501fd29e0c03aad226668f3a9429c1898d430f716269513ca50915d1f53a01da not found: ID does not exist" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.899276 4784 scope.go:117] "RemoveContainer" containerID="b771c84b2fee4dfd30eea462159329d4aa16e7a25274eb645e61e328c8f50840" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.903488 4784 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.903516 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85mml\" (UniqueName: \"kubernetes.io/projected/34efb561-da62-425b-bb70-115757e6d00d-kube-api-access-85mml\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.903525 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.903534 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34efb561-da62-425b-bb70-115757e6d00d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.903558 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tgc98\" (UniqueName: \"kubernetes.io/projected/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-kube-api-access-tgc98\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.903567 4784 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3627acbf-1c12-4e8e-97f0-e44a6cd124c3-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:33 crc kubenswrapper[4784]: I0106 08:38:33.937031 4784 scope.go:117] "RemoveContainer" containerID="3dbabc5c6ed9e67890a74683afbfe123a07fdb5e9564f6fd5fe7f8edcd519e4c" Jan 06 08:38:34 crc kubenswrapper[4784]: E0106 08:38:34.034845 4784 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c98eb91_7877_4dd7_b694_52b017726242.slice/crio-conmon-b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c.scope\": RecentStats: unable to find data in memory cache]" Jan 06 08:38:34 crc kubenswrapper[4784]: E0106 08:38:34.075030 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="69445d35f26da5bf10ec6be0ee60f62f15246b38e92ee1a9507d816397d15902" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 06 08:38:34 crc kubenswrapper[4784]: E0106 08:38:34.081754 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="69445d35f26da5bf10ec6be0ee60f62f15246b38e92ee1a9507d816397d15902" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.082810 4784 generic.go:334] "Generic (PLEG): container finished" podID="b6a277ac-73de-4e2b-b39f-73d467b2222c" containerID="e9133ce89d3aa5addf5d0b1b6c3f09deddf66ba69d3abc4a37dfab0a890825aa" exitCode=0 Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.082934 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b6a277ac-73de-4e2b-b39f-73d467b2222c","Type":"ContainerDied","Data":"e9133ce89d3aa5addf5d0b1b6c3f09deddf66ba69d3abc4a37dfab0a890825aa"} Jan 06 08:38:34 crc kubenswrapper[4784]: E0106 08:38:34.084563 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="69445d35f26da5bf10ec6be0ee60f62f15246b38e92ee1a9507d816397d15902" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 06 08:38:34 crc kubenswrapper[4784]: E0106 08:38:34.084612 4784 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="d1e87443-2d75-4063-934c-dc593d03987c" containerName="nova-scheduler-scheduler" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.103156 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"4dd5733d-6502-4030-a012-be296b7d11c1","Type":"ContainerDied","Data":"26ae5fd9dd796b58a427fbb8e745aaa9de4b2972ab006a63b81014975f2904d4"} Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.103235 4784 scope.go:117] "RemoveContainer" containerID="c2c0ff5bb0a05540e0092ee7d0986a984f68d0ed56bed0238c7fd744e6c37ee5" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.104159 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.112294 4784 generic.go:334] "Generic (PLEG): container finished" podID="3627acbf-1c12-4e8e-97f0-e44a6cd124c3" containerID="4287d43d16c939ca0cffadb1469454f8c0eafb31a4cd129fbdd4830a7590fe28" exitCode=0 Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.112358 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.112395 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"3627acbf-1c12-4e8e-97f0-e44a6cd124c3","Type":"ContainerDied","Data":"4287d43d16c939ca0cffadb1469454f8c0eafb31a4cd129fbdd4830a7590fe28"} Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.112444 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"3627acbf-1c12-4e8e-97f0-e44a6cd124c3","Type":"ContainerDied","Data":"d297b8b13f5f10e37279f9ad278dd1765f896054c3c6c4059cfdcd26280764b0"} Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.131037 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-n9mzl" event={"ID":"34efb561-da62-425b-bb70-115757e6d00d","Type":"ContainerDied","Data":"06baa117f43cbceec48e98840c75f0fb4bf7aeee4cdbe942d91de61b792afb1e"} Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.131134 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-n9mzl" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.152341 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.167026 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 06 08:38:34 crc kubenswrapper[4784]: E0106 08:38:34.172089 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e9133ce89d3aa5addf5d0b1b6c3f09deddf66ba69d3abc4a37dfab0a890825aa is running failed: container process not found" containerID="e9133ce89d3aa5addf5d0b1b6c3f09deddf66ba69d3abc4a37dfab0a890825aa" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 06 08:38:34 crc kubenswrapper[4784]: E0106 08:38:34.172865 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e9133ce89d3aa5addf5d0b1b6c3f09deddf66ba69d3abc4a37dfab0a890825aa is running failed: container process not found" containerID="e9133ce89d3aa5addf5d0b1b6c3f09deddf66ba69d3abc4a37dfab0a890825aa" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 06 08:38:34 crc kubenswrapper[4784]: E0106 08:38:34.173604 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e9133ce89d3aa5addf5d0b1b6c3f09deddf66ba69d3abc4a37dfab0a890825aa is running failed: container process not found" containerID="e9133ce89d3aa5addf5d0b1b6c3f09deddf66ba69d3abc4a37dfab0a890825aa" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 06 08:38:34 crc kubenswrapper[4784]: E0106 08:38:34.173658 4784 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e9133ce89d3aa5addf5d0b1b6c3f09deddf66ba69d3abc4a37dfab0a890825aa is running failed: container process not found" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="b6a277ac-73de-4e2b-b39f-73d467b2222c" containerName="galera" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.174418 4784 scope.go:117] "RemoveContainer" containerID="4287d43d16c939ca0cffadb1469454f8c0eafb31a4cd129fbdd4830a7590fe28" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.184143 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.196944 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.213052 4784 scope.go:117] "RemoveContainer" containerID="4287d43d16c939ca0cffadb1469454f8c0eafb31a4cd129fbdd4830a7590fe28" Jan 06 08:38:34 crc kubenswrapper[4784]: E0106 08:38:34.213866 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4287d43d16c939ca0cffadb1469454f8c0eafb31a4cd129fbdd4830a7590fe28\": container with ID starting with 4287d43d16c939ca0cffadb1469454f8c0eafb31a4cd129fbdd4830a7590fe28 not found: ID does not exist" containerID="4287d43d16c939ca0cffadb1469454f8c0eafb31a4cd129fbdd4830a7590fe28" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.213929 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4287d43d16c939ca0cffadb1469454f8c0eafb31a4cd129fbdd4830a7590fe28"} err="failed to get container status \"4287d43d16c939ca0cffadb1469454f8c0eafb31a4cd129fbdd4830a7590fe28\": rpc error: code = NotFound desc = could not find container \"4287d43d16c939ca0cffadb1469454f8c0eafb31a4cd129fbdd4830a7590fe28\": container with ID starting with 4287d43d16c939ca0cffadb1469454f8c0eafb31a4cd129fbdd4830a7590fe28 not found: ID does not exist" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.234647 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-n9mzl"] Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.246043 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-n9mzl"] Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.322876 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11de2666-488c-4ccd-8d33-37e9e957a8c8" path="/var/lib/kubelet/pods/11de2666-488c-4ccd-8d33-37e9e957a8c8/volumes" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.323448 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15cd1678-570e-47b5-bcb0-6745b8aa95cb" path="/var/lib/kubelet/pods/15cd1678-570e-47b5-bcb0-6745b8aa95cb/volumes" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.324631 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19f8ed37-5996-433b-9915-97489c1d8f11" path="/var/lib/kubelet/pods/19f8ed37-5996-433b-9915-97489c1d8f11/volumes" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.326353 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34efb561-da62-425b-bb70-115757e6d00d" path="/var/lib/kubelet/pods/34efb561-da62-425b-bb70-115757e6d00d/volumes" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.326961 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3627acbf-1c12-4e8e-97f0-e44a6cd124c3" path="/var/lib/kubelet/pods/3627acbf-1c12-4e8e-97f0-e44a6cd124c3/volumes" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.327617 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4dd5733d-6502-4030-a012-be296b7d11c1" path="/var/lib/kubelet/pods/4dd5733d-6502-4030-a012-be296b7d11c1/volumes" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.328314 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cd2b801-83a4-410f-a555-8dfda270713a" path="/var/lib/kubelet/pods/6cd2b801-83a4-410f-a555-8dfda270713a/volumes" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.329681 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79cac775-c143-4370-bf3b-b25e2ca62120" path="/var/lib/kubelet/pods/79cac775-c143-4370-bf3b-b25e2ca62120/volumes" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.330396 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3f9f481-e72f-47a3-bd2c-33ec9bb8025b" path="/var/lib/kubelet/pods/b3f9f481-e72f-47a3-bd2c-33ec9bb8025b/volumes" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.330852 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bed6a7b9-0069-4ea7-b813-70a5808d18db" path="/var/lib/kubelet/pods/bed6a7b9-0069-4ea7-b813-70a5808d18db/volumes" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.332457 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" path="/var/lib/kubelet/pods/db32cfd9-0522-4dc5-b8fd-0cb61d08efd1/volumes" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.333307 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e94ed326-8f56-4933-8616-5814505b58f5" path="/var/lib/kubelet/pods/e94ed326-8f56-4933-8616-5814505b58f5/volumes" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.334500 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed8af5aa-332a-4950-8bbb-ab976b7a2b64" path="/var/lib/kubelet/pods/ed8af5aa-332a-4950-8bbb-ab976b7a2b64/volumes" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.335970 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1733b30-f3c1-414f-9140-f42583e97d31" path="/var/lib/kubelet/pods/f1733b30-f3c1-414f-9140-f42583e97d31/volumes" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.336662 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f364fb9d-ca92-487a-9e6f-6d85a97117d0" path="/var/lib/kubelet/pods/f364fb9d-ca92-487a-9e6f-6d85a97117d0/volumes" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.583803 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_1c98eb91-7877-4dd7-b694-52b017726242/ovn-northd/0.log" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.583894 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.624947 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c98eb91-7877-4dd7-b694-52b017726242-config\") pod \"1c98eb91-7877-4dd7-b694-52b017726242\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.625100 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-combined-ca-bundle\") pod \"1c98eb91-7877-4dd7-b694-52b017726242\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.625201 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whc8k\" (UniqueName: \"kubernetes.io/projected/1c98eb91-7877-4dd7-b694-52b017726242-kube-api-access-whc8k\") pod \"1c98eb91-7877-4dd7-b694-52b017726242\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.625295 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c98eb91-7877-4dd7-b694-52b017726242-scripts\") pod \"1c98eb91-7877-4dd7-b694-52b017726242\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.625439 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-ovn-northd-tls-certs\") pod \"1c98eb91-7877-4dd7-b694-52b017726242\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.625506 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-metrics-certs-tls-certs\") pod \"1c98eb91-7877-4dd7-b694-52b017726242\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.625608 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1c98eb91-7877-4dd7-b694-52b017726242-ovn-rundir\") pod \"1c98eb91-7877-4dd7-b694-52b017726242\" (UID: \"1c98eb91-7877-4dd7-b694-52b017726242\") " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.626304 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c98eb91-7877-4dd7-b694-52b017726242-config" (OuterVolumeSpecName: "config") pod "1c98eb91-7877-4dd7-b694-52b017726242" (UID: "1c98eb91-7877-4dd7-b694-52b017726242"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.626842 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c98eb91-7877-4dd7-b694-52b017726242-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "1c98eb91-7877-4dd7-b694-52b017726242" (UID: "1c98eb91-7877-4dd7-b694-52b017726242"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.627305 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c98eb91-7877-4dd7-b694-52b017726242-scripts" (OuterVolumeSpecName: "scripts") pod "1c98eb91-7877-4dd7-b694-52b017726242" (UID: "1c98eb91-7877-4dd7-b694-52b017726242"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.648795 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c98eb91-7877-4dd7-b694-52b017726242-kube-api-access-whc8k" (OuterVolumeSpecName: "kube-api-access-whc8k") pod "1c98eb91-7877-4dd7-b694-52b017726242" (UID: "1c98eb91-7877-4dd7-b694-52b017726242"). InnerVolumeSpecName "kube-api-access-whc8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.654233 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1c98eb91-7877-4dd7-b694-52b017726242" (UID: "1c98eb91-7877-4dd7-b694-52b017726242"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.671187 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.725415 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "1c98eb91-7877-4dd7-b694-52b017726242" (UID: "1c98eb91-7877-4dd7-b694-52b017726242"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.727280 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kj57t\" (UniqueName: \"kubernetes.io/projected/b6a277ac-73de-4e2b-b39f-73d467b2222c-kube-api-access-kj57t\") pod \"b6a277ac-73de-4e2b-b39f-73d467b2222c\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.727399 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6a277ac-73de-4e2b-b39f-73d467b2222c-combined-ca-bundle\") pod \"b6a277ac-73de-4e2b-b39f-73d467b2222c\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.727430 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-operator-scripts\") pod \"b6a277ac-73de-4e2b-b39f-73d467b2222c\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.727498 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b6a277ac-73de-4e2b-b39f-73d467b2222c-config-data-generated\") pod \"b6a277ac-73de-4e2b-b39f-73d467b2222c\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.727558 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6a277ac-73de-4e2b-b39f-73d467b2222c-galera-tls-certs\") pod \"b6a277ac-73de-4e2b-b39f-73d467b2222c\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.727631 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"b6a277ac-73de-4e2b-b39f-73d467b2222c\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.727706 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-config-data-default\") pod \"b6a277ac-73de-4e2b-b39f-73d467b2222c\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.727785 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-kolla-config\") pod \"b6a277ac-73de-4e2b-b39f-73d467b2222c\" (UID: \"b6a277ac-73de-4e2b-b39f-73d467b2222c\") " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.728293 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c98eb91-7877-4dd7-b694-52b017726242-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.728330 4784 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.728343 4784 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1c98eb91-7877-4dd7-b694-52b017726242-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.728354 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c98eb91-7877-4dd7-b694-52b017726242-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.728364 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.728373 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whc8k\" (UniqueName: \"kubernetes.io/projected/1c98eb91-7877-4dd7-b694-52b017726242-kube-api-access-whc8k\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:34 crc kubenswrapper[4784]: E0106 08:38:34.728479 4784 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 06 08:38:34 crc kubenswrapper[4784]: E0106 08:38:34.728566 4784 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data podName:052ecaa6-58fd-42ed-b2c5-6b8919470619 nodeName:}" failed. No retries permitted until 2026-01-06 08:38:42.728523674 +0000 UTC m=+1424.774696511 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data") pod "rabbitmq-server-0" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619") : configmap "rabbitmq-config-data" not found Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.731978 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "b6a277ac-73de-4e2b-b39f-73d467b2222c" (UID: "b6a277ac-73de-4e2b-b39f-73d467b2222c"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.732442 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6a277ac-73de-4e2b-b39f-73d467b2222c-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "b6a277ac-73de-4e2b-b39f-73d467b2222c" (UID: "b6a277ac-73de-4e2b-b39f-73d467b2222c"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.732660 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "b6a277ac-73de-4e2b-b39f-73d467b2222c" (UID: "b6a277ac-73de-4e2b-b39f-73d467b2222c"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.733368 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b6a277ac-73de-4e2b-b39f-73d467b2222c" (UID: "b6a277ac-73de-4e2b-b39f-73d467b2222c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.733587 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6a277ac-73de-4e2b-b39f-73d467b2222c-kube-api-access-kj57t" (OuterVolumeSpecName: "kube-api-access-kj57t") pod "b6a277ac-73de-4e2b-b39f-73d467b2222c" (UID: "b6a277ac-73de-4e2b-b39f-73d467b2222c"). InnerVolumeSpecName "kube-api-access-kj57t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.738630 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "1c98eb91-7877-4dd7-b694-52b017726242" (UID: "1c98eb91-7877-4dd7-b694-52b017726242"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.749871 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "mysql-db") pod "b6a277ac-73de-4e2b-b39f-73d467b2222c" (UID: "b6a277ac-73de-4e2b-b39f-73d467b2222c"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.760946 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6a277ac-73de-4e2b-b39f-73d467b2222c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6a277ac-73de-4e2b-b39f-73d467b2222c" (UID: "b6a277ac-73de-4e2b-b39f-73d467b2222c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.784291 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6a277ac-73de-4e2b-b39f-73d467b2222c-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "b6a277ac-73de-4e2b-b39f-73d467b2222c" (UID: "b6a277ac-73de-4e2b-b39f-73d467b2222c"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.829842 4784 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b6a277ac-73de-4e2b-b39f-73d467b2222c-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.830420 4784 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6a277ac-73de-4e2b-b39f-73d467b2222c-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.830456 4784 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.830469 4784 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.830482 4784 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.830492 4784 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c98eb91-7877-4dd7-b694-52b017726242-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.830504 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kj57t\" (UniqueName: \"kubernetes.io/projected/b6a277ac-73de-4e2b-b39f-73d467b2222c-kube-api-access-kj57t\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.830514 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6a277ac-73de-4e2b-b39f-73d467b2222c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.830534 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6a277ac-73de-4e2b-b39f-73d467b2222c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.854829 4784 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 06 08:38:34 crc kubenswrapper[4784]: I0106 08:38:34.932384 4784 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.170385 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b6a277ac-73de-4e2b-b39f-73d467b2222c","Type":"ContainerDied","Data":"355be865b4877b40ac6bde96b2a5dd704306f244716ef6edd16465fad9a403dd"} Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.170443 4784 scope.go:117] "RemoveContainer" containerID="e9133ce89d3aa5addf5d0b1b6c3f09deddf66ba69d3abc4a37dfab0a890825aa" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.170611 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.177021 4784 generic.go:334] "Generic (PLEG): container finished" podID="47f75a1e-4d3b-4460-8420-05ac7e981c8e" containerID="4223cd132ca6515fc76e29d4b62d62f23ca0125b03efe4932036e3c3b22ddecd" exitCode=0 Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.177081 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5c58bd8cfd-bjvgd" event={"ID":"47f75a1e-4d3b-4460-8420-05ac7e981c8e","Type":"ContainerDied","Data":"4223cd132ca6515fc76e29d4b62d62f23ca0125b03efe4932036e3c3b22ddecd"} Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.179731 4784 generic.go:334] "Generic (PLEG): container finished" podID="41c89df0-d35f-4f47-86f3-71a2c0971d79" containerID="d32e01d069e2a7fe432e20265968b48ead1ba6a001b6421c4e55bfdf12b10616" exitCode=0 Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.179763 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"41c89df0-d35f-4f47-86f3-71a2c0971d79","Type":"ContainerDied","Data":"d32e01d069e2a7fe432e20265968b48ead1ba6a001b6421c4e55bfdf12b10616"} Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.188617 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_1c98eb91-7877-4dd7-b694-52b017726242/ovn-northd/0.log" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.188741 4784 generic.go:334] "Generic (PLEG): container finished" podID="1c98eb91-7877-4dd7-b694-52b017726242" containerID="b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c" exitCode=139 Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.188784 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1c98eb91-7877-4dd7-b694-52b017726242","Type":"ContainerDied","Data":"b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c"} Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.188816 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1c98eb91-7877-4dd7-b694-52b017726242","Type":"ContainerDied","Data":"3ac7d055a3a2633de96c3ee58e08a4e27636b128faf21d74984d7e2fc3a9e22b"} Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.188854 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.239200 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.245696 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.304152 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.306638 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.306731 4784 scope.go:117] "RemoveContainer" containerID="5906b3d46bac70f13bc27fb60444d1ee0a413a195f0184464f3dcff5699e7583" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.319773 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.340485 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-credential-keys\") pod \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.340563 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-internal-tls-certs\") pod \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.340640 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-fernet-keys\") pod \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.340663 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-scripts\") pod \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.340687 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-config-data\") pod \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.340737 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-public-tls-certs\") pod \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.340769 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jr2tg\" (UniqueName: \"kubernetes.io/projected/47f75a1e-4d3b-4460-8420-05ac7e981c8e-kube-api-access-jr2tg\") pod \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.340795 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-combined-ca-bundle\") pod \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\" (UID: \"47f75a1e-4d3b-4460-8420-05ac7e981c8e\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.346387 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-scripts" (OuterVolumeSpecName: "scripts") pod "47f75a1e-4d3b-4460-8420-05ac7e981c8e" (UID: "47f75a1e-4d3b-4460-8420-05ac7e981c8e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.347691 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "47f75a1e-4d3b-4460-8420-05ac7e981c8e" (UID: "47f75a1e-4d3b-4460-8420-05ac7e981c8e"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.347876 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "47f75a1e-4d3b-4460-8420-05ac7e981c8e" (UID: "47f75a1e-4d3b-4460-8420-05ac7e981c8e"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.348032 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47f75a1e-4d3b-4460-8420-05ac7e981c8e-kube-api-access-jr2tg" (OuterVolumeSpecName: "kube-api-access-jr2tg") pod "47f75a1e-4d3b-4460-8420-05ac7e981c8e" (UID: "47f75a1e-4d3b-4460-8420-05ac7e981c8e"). InnerVolumeSpecName "kube-api-access-jr2tg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.374705 4784 scope.go:117] "RemoveContainer" containerID="3c30f92b8011e87722d0ed074d9c419ca54128be08ee18cd99b32d3ef8974baf" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.375443 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-config-data" (OuterVolumeSpecName: "config-data") pod "47f75a1e-4d3b-4460-8420-05ac7e981c8e" (UID: "47f75a1e-4d3b-4460-8420-05ac7e981c8e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.386833 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "47f75a1e-4d3b-4460-8420-05ac7e981c8e" (UID: "47f75a1e-4d3b-4460-8420-05ac7e981c8e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.391574 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "47f75a1e-4d3b-4460-8420-05ac7e981c8e" (UID: "47f75a1e-4d3b-4460-8420-05ac7e981c8e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.399819 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "47f75a1e-4d3b-4460-8420-05ac7e981c8e" (UID: "47f75a1e-4d3b-4460-8420-05ac7e981c8e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.402842 4784 scope.go:117] "RemoveContainer" containerID="b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.427675 4784 scope.go:117] "RemoveContainer" containerID="3c30f92b8011e87722d0ed074d9c419ca54128be08ee18cd99b32d3ef8974baf" Jan 06 08:38:35 crc kubenswrapper[4784]: E0106 08:38:35.428361 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c30f92b8011e87722d0ed074d9c419ca54128be08ee18cd99b32d3ef8974baf\": container with ID starting with 3c30f92b8011e87722d0ed074d9c419ca54128be08ee18cd99b32d3ef8974baf not found: ID does not exist" containerID="3c30f92b8011e87722d0ed074d9c419ca54128be08ee18cd99b32d3ef8974baf" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.428403 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c30f92b8011e87722d0ed074d9c419ca54128be08ee18cd99b32d3ef8974baf"} err="failed to get container status \"3c30f92b8011e87722d0ed074d9c419ca54128be08ee18cd99b32d3ef8974baf\": rpc error: code = NotFound desc = could not find container \"3c30f92b8011e87722d0ed074d9c419ca54128be08ee18cd99b32d3ef8974baf\": container with ID starting with 3c30f92b8011e87722d0ed074d9c419ca54128be08ee18cd99b32d3ef8974baf not found: ID does not exist" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.428427 4784 scope.go:117] "RemoveContainer" containerID="b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c" Jan 06 08:38:35 crc kubenswrapper[4784]: E0106 08:38:35.428951 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c\": container with ID starting with b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c not found: ID does not exist" containerID="b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.428976 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c"} err="failed to get container status \"b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c\": rpc error: code = NotFound desc = could not find container \"b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c\": container with ID starting with b511194f6c7ada2911837b1a60b5bf76e647db9e94944502d85a8f3198cb4b1c not found: ID does not exist" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.443592 4784 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.443625 4784 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.443635 4784 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.443646 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.443655 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.443666 4784 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.443676 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jr2tg\" (UniqueName: \"kubernetes.io/projected/47f75a1e-4d3b-4460-8420-05ac7e981c8e-kube-api-access-jr2tg\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.443685 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47f75a1e-4d3b-4460-8420-05ac7e981c8e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.582360 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.647200 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5cn7b\" (UniqueName: \"kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-kube-api-access-5cn7b\") pod \"41c89df0-d35f-4f47-86f3-71a2c0971d79\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.647335 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/41c89df0-d35f-4f47-86f3-71a2c0971d79-pod-info\") pod \"41c89df0-d35f-4f47-86f3-71a2c0971d79\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.647367 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-plugins-conf\") pod \"41c89df0-d35f-4f47-86f3-71a2c0971d79\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.647399 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/41c89df0-d35f-4f47-86f3-71a2c0971d79-erlang-cookie-secret\") pod \"41c89df0-d35f-4f47-86f3-71a2c0971d79\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.647434 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-erlang-cookie\") pod \"41c89df0-d35f-4f47-86f3-71a2c0971d79\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.647463 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-confd\") pod \"41c89df0-d35f-4f47-86f3-71a2c0971d79\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.647563 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-plugins\") pod \"41c89df0-d35f-4f47-86f3-71a2c0971d79\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.647598 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-server-conf\") pod \"41c89df0-d35f-4f47-86f3-71a2c0971d79\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.647647 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"41c89df0-d35f-4f47-86f3-71a2c0971d79\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.647674 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-tls\") pod \"41c89df0-d35f-4f47-86f3-71a2c0971d79\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.647727 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data\") pod \"41c89df0-d35f-4f47-86f3-71a2c0971d79\" (UID: \"41c89df0-d35f-4f47-86f3-71a2c0971d79\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.649485 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "41c89df0-d35f-4f47-86f3-71a2c0971d79" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.661909 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-kube-api-access-5cn7b" (OuterVolumeSpecName: "kube-api-access-5cn7b") pod "41c89df0-d35f-4f47-86f3-71a2c0971d79" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79"). InnerVolumeSpecName "kube-api-access-5cn7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.662947 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "41c89df0-d35f-4f47-86f3-71a2c0971d79" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.666373 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "41c89df0-d35f-4f47-86f3-71a2c0971d79" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.666743 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "persistence") pod "41c89df0-d35f-4f47-86f3-71a2c0971d79" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.680179 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data" (OuterVolumeSpecName: "config-data") pod "41c89df0-d35f-4f47-86f3-71a2c0971d79" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.682300 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41c89df0-d35f-4f47-86f3-71a2c0971d79-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "41c89df0-d35f-4f47-86f3-71a2c0971d79" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.682735 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/41c89df0-d35f-4f47-86f3-71a2c0971d79-pod-info" (OuterVolumeSpecName: "pod-info") pod "41c89df0-d35f-4f47-86f3-71a2c0971d79" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.706930 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "41c89df0-d35f-4f47-86f3-71a2c0971d79" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.736685 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-server-conf" (OuterVolumeSpecName: "server-conf") pod "41c89df0-d35f-4f47-86f3-71a2c0971d79" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.751970 4784 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.752021 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.752037 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.752052 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5cn7b\" (UniqueName: \"kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-kube-api-access-5cn7b\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.752065 4784 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/41c89df0-d35f-4f47-86f3-71a2c0971d79-pod-info\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.752077 4784 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.752092 4784 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/41c89df0-d35f-4f47-86f3-71a2c0971d79-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.752140 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.752163 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.752180 4784 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/41c89df0-d35f-4f47-86f3-71a2c0971d79-server-conf\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.779403 4784 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.795757 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.803580 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "41c89df0-d35f-4f47-86f3-71a2c0971d79" (UID: "41c89df0-d35f-4f47-86f3-71a2c0971d79"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.853119 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-plugins-conf\") pod \"052ecaa6-58fd-42ed-b2c5-6b8919470619\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.853182 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-server-conf\") pod \"052ecaa6-58fd-42ed-b2c5-6b8919470619\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.853247 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"052ecaa6-58fd-42ed-b2c5-6b8919470619\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.853281 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/052ecaa6-58fd-42ed-b2c5-6b8919470619-erlang-cookie-secret\") pod \"052ecaa6-58fd-42ed-b2c5-6b8919470619\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.853311 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-plugins\") pod \"052ecaa6-58fd-42ed-b2c5-6b8919470619\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.853372 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/052ecaa6-58fd-42ed-b2c5-6b8919470619-pod-info\") pod \"052ecaa6-58fd-42ed-b2c5-6b8919470619\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.853402 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smlpk\" (UniqueName: \"kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-kube-api-access-smlpk\") pod \"052ecaa6-58fd-42ed-b2c5-6b8919470619\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.853759 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "052ecaa6-58fd-42ed-b2c5-6b8919470619" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.854044 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-tls\") pod \"052ecaa6-58fd-42ed-b2c5-6b8919470619\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.854096 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data\") pod \"052ecaa6-58fd-42ed-b2c5-6b8919470619\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.854143 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-erlang-cookie\") pod \"052ecaa6-58fd-42ed-b2c5-6b8919470619\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.854182 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "052ecaa6-58fd-42ed-b2c5-6b8919470619" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.854272 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-confd\") pod \"052ecaa6-58fd-42ed-b2c5-6b8919470619\" (UID: \"052ecaa6-58fd-42ed-b2c5-6b8919470619\") " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.854744 4784 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.854772 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.854792 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/41c89df0-d35f-4f47-86f3-71a2c0971d79-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.854804 4784 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.854810 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "052ecaa6-58fd-42ed-b2c5-6b8919470619" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.856765 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/052ecaa6-58fd-42ed-b2c5-6b8919470619-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "052ecaa6-58fd-42ed-b2c5-6b8919470619" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.856868 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "052ecaa6-58fd-42ed-b2c5-6b8919470619" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.856985 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/052ecaa6-58fd-42ed-b2c5-6b8919470619-pod-info" (OuterVolumeSpecName: "pod-info") pod "052ecaa6-58fd-42ed-b2c5-6b8919470619" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.857356 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "052ecaa6-58fd-42ed-b2c5-6b8919470619" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.857971 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-kube-api-access-smlpk" (OuterVolumeSpecName: "kube-api-access-smlpk") pod "052ecaa6-58fd-42ed-b2c5-6b8919470619" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619"). InnerVolumeSpecName "kube-api-access-smlpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.874512 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data" (OuterVolumeSpecName: "config-data") pod "052ecaa6-58fd-42ed-b2c5-6b8919470619" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.894469 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-server-conf" (OuterVolumeSpecName: "server-conf") pod "052ecaa6-58fd-42ed-b2c5-6b8919470619" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.926920 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "052ecaa6-58fd-42ed-b2c5-6b8919470619" (UID: "052ecaa6-58fd-42ed-b2c5-6b8919470619"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.956813 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.956871 4784 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-server-conf\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.956923 4784 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.956938 4784 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/052ecaa6-58fd-42ed-b2c5-6b8919470619-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.956953 4784 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/052ecaa6-58fd-42ed-b2c5-6b8919470619-pod-info\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.956967 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smlpk\" (UniqueName: \"kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-kube-api-access-smlpk\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.956981 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.956992 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/052ecaa6-58fd-42ed-b2c5-6b8919470619-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.957005 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/052ecaa6-58fd-42ed-b2c5-6b8919470619-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:35 crc kubenswrapper[4784]: I0106 08:38:35.978492 4784 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.059731 4784 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:36 crc kubenswrapper[4784]: E0106 08:38:36.200369 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.201228 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"41c89df0-d35f-4f47-86f3-71a2c0971d79","Type":"ContainerDied","Data":"47f899ad74f75b80ff11af11be7d255985dd06ec857a6ea9ad0b8b936409fcef"} Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.201302 4784 scope.go:117] "RemoveContainer" containerID="d32e01d069e2a7fe432e20265968b48ead1ba6a001b6421c4e55bfdf12b10616" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.201439 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 06 08:38:36 crc kubenswrapper[4784]: E0106 08:38:36.202062 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:36 crc kubenswrapper[4784]: E0106 08:38:36.202454 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:36 crc kubenswrapper[4784]: E0106 08:38:36.202503 4784 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-2n9kz" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovsdb-server" Jan 06 08:38:36 crc kubenswrapper[4784]: E0106 08:38:36.204923 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:36 crc kubenswrapper[4784]: E0106 08:38:36.207011 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:36 crc kubenswrapper[4784]: E0106 08:38:36.208272 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:36 crc kubenswrapper[4784]: E0106 08:38:36.208365 4784 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-2n9kz" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovs-vswitchd" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.216083 4784 generic.go:334] "Generic (PLEG): container finished" podID="052ecaa6-58fd-42ed-b2c5-6b8919470619" containerID="f24ed2b7a2fbd1b9b4d0209b2b0448142937b3cd525d3833238d00d846deb5fc" exitCode=0 Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.216185 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"052ecaa6-58fd-42ed-b2c5-6b8919470619","Type":"ContainerDied","Data":"f24ed2b7a2fbd1b9b4d0209b2b0448142937b3cd525d3833238d00d846deb5fc"} Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.216257 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"052ecaa6-58fd-42ed-b2c5-6b8919470619","Type":"ContainerDied","Data":"9dd22c7b81cfee4d0292f41912be1d5cc14198cc7a7057f44c9ea482f9c856cd"} Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.216333 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.218063 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5c58bd8cfd-bjvgd" event={"ID":"47f75a1e-4d3b-4460-8420-05ac7e981c8e","Type":"ContainerDied","Data":"2f3bb16f1555024f2cc7a9259fc9047d35325a194ca9389b1c3085531a670606"} Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.218166 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5c58bd8cfd-bjvgd" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.224453 4784 scope.go:117] "RemoveContainer" containerID="bd03e04330f8e1c997eb9c5a6519ec44ddc8665c10bea822cf494fdb01acc628" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.259964 4784 scope.go:117] "RemoveContainer" containerID="f24ed2b7a2fbd1b9b4d0209b2b0448142937b3cd525d3833238d00d846deb5fc" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.260889 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.270186 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.291925 4784 scope.go:117] "RemoveContainer" containerID="613af1447384aa02c92ffc00120a9eb3d6a1362e2f325edc92e8fcc3a2447c9f" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.297778 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.304638 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.310218 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-5c58bd8cfd-bjvgd"] Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.326163 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="052ecaa6-58fd-42ed-b2c5-6b8919470619" path="/var/lib/kubelet/pods/052ecaa6-58fd-42ed-b2c5-6b8919470619/volumes" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.327161 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c98eb91-7877-4dd7-b694-52b017726242" path="/var/lib/kubelet/pods/1c98eb91-7877-4dd7-b694-52b017726242/volumes" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.328446 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41c89df0-d35f-4f47-86f3-71a2c0971d79" path="/var/lib/kubelet/pods/41c89df0-d35f-4f47-86f3-71a2c0971d79/volumes" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.329226 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6a277ac-73de-4e2b-b39f-73d467b2222c" path="/var/lib/kubelet/pods/b6a277ac-73de-4e2b-b39f-73d467b2222c/volumes" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.329852 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-5c58bd8cfd-bjvgd"] Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.367706 4784 scope.go:117] "RemoveContainer" containerID="f24ed2b7a2fbd1b9b4d0209b2b0448142937b3cd525d3833238d00d846deb5fc" Jan 06 08:38:36 crc kubenswrapper[4784]: E0106 08:38:36.368271 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f24ed2b7a2fbd1b9b4d0209b2b0448142937b3cd525d3833238d00d846deb5fc\": container with ID starting with f24ed2b7a2fbd1b9b4d0209b2b0448142937b3cd525d3833238d00d846deb5fc not found: ID does not exist" containerID="f24ed2b7a2fbd1b9b4d0209b2b0448142937b3cd525d3833238d00d846deb5fc" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.368309 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f24ed2b7a2fbd1b9b4d0209b2b0448142937b3cd525d3833238d00d846deb5fc"} err="failed to get container status \"f24ed2b7a2fbd1b9b4d0209b2b0448142937b3cd525d3833238d00d846deb5fc\": rpc error: code = NotFound desc = could not find container \"f24ed2b7a2fbd1b9b4d0209b2b0448142937b3cd525d3833238d00d846deb5fc\": container with ID starting with f24ed2b7a2fbd1b9b4d0209b2b0448142937b3cd525d3833238d00d846deb5fc not found: ID does not exist" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.368336 4784 scope.go:117] "RemoveContainer" containerID="613af1447384aa02c92ffc00120a9eb3d6a1362e2f325edc92e8fcc3a2447c9f" Jan 06 08:38:36 crc kubenswrapper[4784]: E0106 08:38:36.370307 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"613af1447384aa02c92ffc00120a9eb3d6a1362e2f325edc92e8fcc3a2447c9f\": container with ID starting with 613af1447384aa02c92ffc00120a9eb3d6a1362e2f325edc92e8fcc3a2447c9f not found: ID does not exist" containerID="613af1447384aa02c92ffc00120a9eb3d6a1362e2f325edc92e8fcc3a2447c9f" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.370335 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"613af1447384aa02c92ffc00120a9eb3d6a1362e2f325edc92e8fcc3a2447c9f"} err="failed to get container status \"613af1447384aa02c92ffc00120a9eb3d6a1362e2f325edc92e8fcc3a2447c9f\": rpc error: code = NotFound desc = could not find container \"613af1447384aa02c92ffc00120a9eb3d6a1362e2f325edc92e8fcc3a2447c9f\": container with ID starting with 613af1447384aa02c92ffc00120a9eb3d6a1362e2f325edc92e8fcc3a2447c9f not found: ID does not exist" Jan 06 08:38:36 crc kubenswrapper[4784]: I0106 08:38:36.370356 4784 scope.go:117] "RemoveContainer" containerID="4223cd132ca6515fc76e29d4b62d62f23ca0125b03efe4932036e3c3b22ddecd" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.189613 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.240157 4784 generic.go:334] "Generic (PLEG): container finished" podID="3ddaa7ef-b912-4b5b-9bfa-820818220eef" containerID="e8dea317ca214f9ea6144a057e6bb7ef38cd17e2a3566ae30882d733e82bb07b" exitCode=0 Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.240261 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" event={"ID":"3ddaa7ef-b912-4b5b-9bfa-820818220eef","Type":"ContainerDied","Data":"e8dea317ca214f9ea6144a057e6bb7ef38cd17e2a3566ae30882d733e82bb07b"} Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.248392 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.250475 4784 generic.go:334] "Generic (PLEG): container finished" podID="8a5f283f-3491-4531-8213-b2c0eb6b3fc8" containerID="1acfc09bce1e2ea954234f8dce2d5022e0b122a502d59c3a110c2b596a5f22d9" exitCode=0 Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.250726 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-55f595f44f-tzkkl" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.250754 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-55f595f44f-tzkkl" event={"ID":"8a5f283f-3491-4531-8213-b2c0eb6b3fc8","Type":"ContainerDied","Data":"1acfc09bce1e2ea954234f8dce2d5022e0b122a502d59c3a110c2b596a5f22d9"} Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.250816 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-55f595f44f-tzkkl" event={"ID":"8a5f283f-3491-4531-8213-b2c0eb6b3fc8","Type":"ContainerDied","Data":"776394bc9292b8ab9260f991242f8bcb263bd670798e1d4459e43ebd4f76295c"} Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.250836 4784 scope.go:117] "RemoveContainer" containerID="1acfc09bce1e2ea954234f8dce2d5022e0b122a502d59c3a110c2b596a5f22d9" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.254125 4784 generic.go:334] "Generic (PLEG): container finished" podID="d1e87443-2d75-4063-934c-dc593d03987c" containerID="69445d35f26da5bf10ec6be0ee60f62f15246b38e92ee1a9507d816397d15902" exitCode=0 Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.254205 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d1e87443-2d75-4063-934c-dc593d03987c","Type":"ContainerDied","Data":"69445d35f26da5bf10ec6be0ee60f62f15246b38e92ee1a9507d816397d15902"} Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.266305 4784 generic.go:334] "Generic (PLEG): container finished" podID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerID="dc6835d04241d55d04ae4a35e417f32bdfac1a264f3e1266dbf5a6c0cea46b5d" exitCode=0 Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.266365 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d","Type":"ContainerDied","Data":"dc6835d04241d55d04ae4a35e417f32bdfac1a264f3e1266dbf5a6c0cea46b5d"} Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.299663 4784 scope.go:117] "RemoveContainer" containerID="1bf390988559ad3f54dd862a9c758b447b84e4f67a158e2cd3efd23826681c28" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.322417 4784 scope.go:117] "RemoveContainer" containerID="1acfc09bce1e2ea954234f8dce2d5022e0b122a502d59c3a110c2b596a5f22d9" Jan 06 08:38:37 crc kubenswrapper[4784]: E0106 08:38:37.322961 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1acfc09bce1e2ea954234f8dce2d5022e0b122a502d59c3a110c2b596a5f22d9\": container with ID starting with 1acfc09bce1e2ea954234f8dce2d5022e0b122a502d59c3a110c2b596a5f22d9 not found: ID does not exist" containerID="1acfc09bce1e2ea954234f8dce2d5022e0b122a502d59c3a110c2b596a5f22d9" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.323004 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1acfc09bce1e2ea954234f8dce2d5022e0b122a502d59c3a110c2b596a5f22d9"} err="failed to get container status \"1acfc09bce1e2ea954234f8dce2d5022e0b122a502d59c3a110c2b596a5f22d9\": rpc error: code = NotFound desc = could not find container \"1acfc09bce1e2ea954234f8dce2d5022e0b122a502d59c3a110c2b596a5f22d9\": container with ID starting with 1acfc09bce1e2ea954234f8dce2d5022e0b122a502d59c3a110c2b596a5f22d9 not found: ID does not exist" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.323027 4784 scope.go:117] "RemoveContainer" containerID="1bf390988559ad3f54dd862a9c758b447b84e4f67a158e2cd3efd23826681c28" Jan 06 08:38:37 crc kubenswrapper[4784]: E0106 08:38:37.323315 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1bf390988559ad3f54dd862a9c758b447b84e4f67a158e2cd3efd23826681c28\": container with ID starting with 1bf390988559ad3f54dd862a9c758b447b84e4f67a158e2cd3efd23826681c28 not found: ID does not exist" containerID="1bf390988559ad3f54dd862a9c758b447b84e4f67a158e2cd3efd23826681c28" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.323334 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bf390988559ad3f54dd862a9c758b447b84e4f67a158e2cd3efd23826681c28"} err="failed to get container status \"1bf390988559ad3f54dd862a9c758b447b84e4f67a158e2cd3efd23826681c28\": rpc error: code = NotFound desc = could not find container \"1bf390988559ad3f54dd862a9c758b447b84e4f67a158e2cd3efd23826681c28\": container with ID starting with 1bf390988559ad3f54dd862a9c758b447b84e4f67a158e2cd3efd23826681c28 not found: ID does not exist" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.388347 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-logs\") pod \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.388385 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmtxg\" (UniqueName: \"kubernetes.io/projected/3ddaa7ef-b912-4b5b-9bfa-820818220eef-kube-api-access-tmtxg\") pod \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.388426 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-config-data-custom\") pod \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.388444 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ddaa7ef-b912-4b5b-9bfa-820818220eef-logs\") pod \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.388464 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-config-data\") pod \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.388519 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-combined-ca-bundle\") pod \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.388595 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-config-data-custom\") pod \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.388697 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2q7qr\" (UniqueName: \"kubernetes.io/projected/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-kube-api-access-2q7qr\") pod \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.388722 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-config-data\") pod \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\" (UID: \"8a5f283f-3491-4531-8213-b2c0eb6b3fc8\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.388749 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-combined-ca-bundle\") pod \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\" (UID: \"3ddaa7ef-b912-4b5b-9bfa-820818220eef\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.391949 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ddaa7ef-b912-4b5b-9bfa-820818220eef-logs" (OuterVolumeSpecName: "logs") pod "3ddaa7ef-b912-4b5b-9bfa-820818220eef" (UID: "3ddaa7ef-b912-4b5b-9bfa-820818220eef"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.392797 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-logs" (OuterVolumeSpecName: "logs") pod "8a5f283f-3491-4531-8213-b2c0eb6b3fc8" (UID: "8a5f283f-3491-4531-8213-b2c0eb6b3fc8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.396105 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ddaa7ef-b912-4b5b-9bfa-820818220eef-kube-api-access-tmtxg" (OuterVolumeSpecName: "kube-api-access-tmtxg") pod "3ddaa7ef-b912-4b5b-9bfa-820818220eef" (UID: "3ddaa7ef-b912-4b5b-9bfa-820818220eef"). InnerVolumeSpecName "kube-api-access-tmtxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.396139 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8a5f283f-3491-4531-8213-b2c0eb6b3fc8" (UID: "8a5f283f-3491-4531-8213-b2c0eb6b3fc8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.397949 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-kube-api-access-2q7qr" (OuterVolumeSpecName: "kube-api-access-2q7qr") pod "8a5f283f-3491-4531-8213-b2c0eb6b3fc8" (UID: "8a5f283f-3491-4531-8213-b2c0eb6b3fc8"). InnerVolumeSpecName "kube-api-access-2q7qr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.414755 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3ddaa7ef-b912-4b5b-9bfa-820818220eef" (UID: "3ddaa7ef-b912-4b5b-9bfa-820818220eef"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.424411 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8a5f283f-3491-4531-8213-b2c0eb6b3fc8" (UID: "8a5f283f-3491-4531-8213-b2c0eb6b3fc8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.443830 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3ddaa7ef-b912-4b5b-9bfa-820818220eef" (UID: "3ddaa7ef-b912-4b5b-9bfa-820818220eef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.448961 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-config-data" (OuterVolumeSpecName: "config-data") pod "3ddaa7ef-b912-4b5b-9bfa-820818220eef" (UID: "3ddaa7ef-b912-4b5b-9bfa-820818220eef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.458366 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-config-data" (OuterVolumeSpecName: "config-data") pod "8a5f283f-3491-4531-8213-b2c0eb6b3fc8" (UID: "8a5f283f-3491-4531-8213-b2c0eb6b3fc8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.490529 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.490594 4784 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.490605 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2q7qr\" (UniqueName: \"kubernetes.io/projected/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-kube-api-access-2q7qr\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.490618 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.490629 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.490639 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a5f283f-3491-4531-8213-b2c0eb6b3fc8-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.490651 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmtxg\" (UniqueName: \"kubernetes.io/projected/3ddaa7ef-b912-4b5b-9bfa-820818220eef-kube-api-access-tmtxg\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.490660 4784 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.490668 4784 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ddaa7ef-b912-4b5b-9bfa-820818220eef-logs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.490676 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ddaa7ef-b912-4b5b-9bfa-820818220eef-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.542344 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.660958 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-55f595f44f-tzkkl"] Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.667744 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-55f595f44f-tzkkl"] Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.694107 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-config-data\") pod \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.694191 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5dt7\" (UniqueName: \"kubernetes.io/projected/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-kube-api-access-s5dt7\") pod \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.694235 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-scripts\") pod \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.694272 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-ceilometer-tls-certs\") pod \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.694310 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-log-httpd\") pod \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.694373 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-run-httpd\") pod \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.694397 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-sg-core-conf-yaml\") pod \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.694461 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-combined-ca-bundle\") pod \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\" (UID: \"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d\") " Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.695264 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" (UID: "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.699498 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-scripts" (OuterVolumeSpecName: "scripts") pod "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" (UID: "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.699775 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-kube-api-access-s5dt7" (OuterVolumeSpecName: "kube-api-access-s5dt7") pod "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" (UID: "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d"). InnerVolumeSpecName "kube-api-access-s5dt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.712294 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" (UID: "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.718415 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" (UID: "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.765449 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" (UID: "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.777166 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" (UID: "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.786831 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-config-data" (OuterVolumeSpecName: "config-data") pod "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" (UID: "5b9e9004-d4f9-4c7e-9346-e20c9f796d3d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.795917 4784 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.795969 4784 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.795987 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.796000 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.796011 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5dt7\" (UniqueName: \"kubernetes.io/projected/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-kube-api-access-s5dt7\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.796025 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.796035 4784 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:37 crc kubenswrapper[4784]: I0106 08:38:37.796047 4784 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.110507 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.286244 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d1e87443-2d75-4063-934c-dc593d03987c","Type":"ContainerDied","Data":"2decc0eaea9f303f34954ab20b57f7c7061d9199bd2b71a622ab717b9ac810f0"} Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.286310 4784 scope.go:117] "RemoveContainer" containerID="69445d35f26da5bf10ec6be0ee60f62f15246b38e92ee1a9507d816397d15902" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.286483 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.293754 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b9e9004-d4f9-4c7e-9346-e20c9f796d3d","Type":"ContainerDied","Data":"35660d6fa6a070566dffa45ab763671bba14b1bead0045555e854f3245462de7"} Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.293809 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.297969 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" event={"ID":"3ddaa7ef-b912-4b5b-9bfa-820818220eef","Type":"ContainerDied","Data":"3743ff4e4a25039cd231726fde85f8fc7183846905a4d555a3075282631d426a"} Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.298098 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.303110 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjlbw\" (UniqueName: \"kubernetes.io/projected/d1e87443-2d75-4063-934c-dc593d03987c-kube-api-access-tjlbw\") pod \"d1e87443-2d75-4063-934c-dc593d03987c\" (UID: \"d1e87443-2d75-4063-934c-dc593d03987c\") " Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.303206 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1e87443-2d75-4063-934c-dc593d03987c-config-data\") pod \"d1e87443-2d75-4063-934c-dc593d03987c\" (UID: \"d1e87443-2d75-4063-934c-dc593d03987c\") " Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.303234 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1e87443-2d75-4063-934c-dc593d03987c-combined-ca-bundle\") pod \"d1e87443-2d75-4063-934c-dc593d03987c\" (UID: \"d1e87443-2d75-4063-934c-dc593d03987c\") " Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.313807 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1e87443-2d75-4063-934c-dc593d03987c-kube-api-access-tjlbw" (OuterVolumeSpecName: "kube-api-access-tjlbw") pod "d1e87443-2d75-4063-934c-dc593d03987c" (UID: "d1e87443-2d75-4063-934c-dc593d03987c"). InnerVolumeSpecName "kube-api-access-tjlbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.316231 4784 scope.go:117] "RemoveContainer" containerID="720bfe1553db1a3c166e22d70a377ca997d8451c2dc8326f20d8aeb1c6b818b6" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.327887 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47f75a1e-4d3b-4460-8420-05ac7e981c8e" path="/var/lib/kubelet/pods/47f75a1e-4d3b-4460-8420-05ac7e981c8e/volumes" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.329113 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a5f283f-3491-4531-8213-b2c0eb6b3fc8" path="/var/lib/kubelet/pods/8a5f283f-3491-4531-8213-b2c0eb6b3fc8/volumes" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.336760 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1e87443-2d75-4063-934c-dc593d03987c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d1e87443-2d75-4063-934c-dc593d03987c" (UID: "d1e87443-2d75-4063-934c-dc593d03987c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.351812 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk"] Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.358397 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1e87443-2d75-4063-934c-dc593d03987c-config-data" (OuterVolumeSpecName: "config-data") pod "d1e87443-2d75-4063-934c-dc593d03987c" (UID: "d1e87443-2d75-4063-934c-dc593d03987c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.367114 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-fd8f7c7f6-bnzsk"] Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.372415 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.374847 4784 scope.go:117] "RemoveContainer" containerID="7f0e5dcde65f104bcb37b1f44544e265a3b934ad96e3df20a22d72ff4674f3a0" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.379595 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.396848 4784 scope.go:117] "RemoveContainer" containerID="dc6835d04241d55d04ae4a35e417f32bdfac1a264f3e1266dbf5a6c0cea46b5d" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.405121 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjlbw\" (UniqueName: \"kubernetes.io/projected/d1e87443-2d75-4063-934c-dc593d03987c-kube-api-access-tjlbw\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.405210 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1e87443-2d75-4063-934c-dc593d03987c-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.405224 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1e87443-2d75-4063-934c-dc593d03987c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.419997 4784 scope.go:117] "RemoveContainer" containerID="4ef361b972f42bea1be49ec4ca483a7dbcd0419667a1513a9db6c46533072348" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.443832 4784 scope.go:117] "RemoveContainer" containerID="e8dea317ca214f9ea6144a057e6bb7ef38cd17e2a3566ae30882d733e82bb07b" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.488487 4784 scope.go:117] "RemoveContainer" containerID="18aae47178054e0d5a91d219ad4b268e6837bc33b481c8e49e5de6f2ab09b27f" Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.627178 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:38:38 crc kubenswrapper[4784]: I0106 08:38:38.634189 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 06 08:38:40 crc kubenswrapper[4784]: I0106 08:38:40.321828 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ddaa7ef-b912-4b5b-9bfa-820818220eef" path="/var/lib/kubelet/pods/3ddaa7ef-b912-4b5b-9bfa-820818220eef/volumes" Jan 06 08:38:40 crc kubenswrapper[4784]: I0106 08:38:40.322951 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" path="/var/lib/kubelet/pods/5b9e9004-d4f9-4c7e-9346-e20c9f796d3d/volumes" Jan 06 08:38:40 crc kubenswrapper[4784]: I0106 08:38:40.323670 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1e87443-2d75-4063-934c-dc593d03987c" path="/var/lib/kubelet/pods/d1e87443-2d75-4063-934c-dc593d03987c/volumes" Jan 06 08:38:41 crc kubenswrapper[4784]: E0106 08:38:41.201704 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:41 crc kubenswrapper[4784]: E0106 08:38:41.202180 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:41 crc kubenswrapper[4784]: E0106 08:38:41.202808 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:41 crc kubenswrapper[4784]: E0106 08:38:41.202849 4784 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-2n9kz" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovsdb-server" Jan 06 08:38:41 crc kubenswrapper[4784]: E0106 08:38:41.203397 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:41 crc kubenswrapper[4784]: E0106 08:38:41.205116 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:41 crc kubenswrapper[4784]: E0106 08:38:41.207128 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:41 crc kubenswrapper[4784]: E0106 08:38:41.207291 4784 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-2n9kz" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovs-vswitchd" Jan 06 08:38:44 crc kubenswrapper[4784]: I0106 08:38:44.351741 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:38:44 crc kubenswrapper[4784]: I0106 08:38:44.352147 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:38:44 crc kubenswrapper[4784]: I0106 08:38:44.352203 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:38:44 crc kubenswrapper[4784]: I0106 08:38:44.352961 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a7659b10b1b4bc4ca6ce124339b33561daf47c43badfd76a44e91fdc1fbdd919"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 08:38:44 crc kubenswrapper[4784]: I0106 08:38:44.353012 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://a7659b10b1b4bc4ca6ce124339b33561daf47c43badfd76a44e91fdc1fbdd919" gracePeriod=600 Jan 06 08:38:45 crc kubenswrapper[4784]: I0106 08:38:45.405971 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="a7659b10b1b4bc4ca6ce124339b33561daf47c43badfd76a44e91fdc1fbdd919" exitCode=0 Jan 06 08:38:45 crc kubenswrapper[4784]: I0106 08:38:45.406116 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"a7659b10b1b4bc4ca6ce124339b33561daf47c43badfd76a44e91fdc1fbdd919"} Jan 06 08:38:45 crc kubenswrapper[4784]: I0106 08:38:45.406805 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509"} Jan 06 08:38:45 crc kubenswrapper[4784]: I0106 08:38:45.406834 4784 scope.go:117] "RemoveContainer" containerID="19653971273eef9ff17d8783cce6bbf50869f896c3dc99c1be3ca028e61421fd" Jan 06 08:38:46 crc kubenswrapper[4784]: E0106 08:38:46.200929 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:46 crc kubenswrapper[4784]: E0106 08:38:46.201284 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:46 crc kubenswrapper[4784]: E0106 08:38:46.201862 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:46 crc kubenswrapper[4784]: E0106 08:38:46.201899 4784 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-2n9kz" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovsdb-server" Jan 06 08:38:46 crc kubenswrapper[4784]: E0106 08:38:46.204166 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:46 crc kubenswrapper[4784]: E0106 08:38:46.206260 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:46 crc kubenswrapper[4784]: E0106 08:38:46.208292 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:46 crc kubenswrapper[4784]: E0106 08:38:46.208372 4784 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-2n9kz" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovs-vswitchd" Jan 06 08:38:51 crc kubenswrapper[4784]: E0106 08:38:51.202092 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:51 crc kubenswrapper[4784]: E0106 08:38:51.203788 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:51 crc kubenswrapper[4784]: E0106 08:38:51.204153 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:51 crc kubenswrapper[4784]: E0106 08:38:51.204430 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:51 crc kubenswrapper[4784]: E0106 08:38:51.204483 4784 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-2n9kz" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovsdb-server" Jan 06 08:38:51 crc kubenswrapper[4784]: E0106 08:38:51.206445 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:51 crc kubenswrapper[4784]: E0106 08:38:51.209003 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:51 crc kubenswrapper[4784]: E0106 08:38:51.209043 4784 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-2n9kz" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovs-vswitchd" Jan 06 08:38:56 crc kubenswrapper[4784]: E0106 08:38:56.201705 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:56 crc kubenswrapper[4784]: E0106 08:38:56.203955 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:56 crc kubenswrapper[4784]: E0106 08:38:56.204639 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 06 08:38:56 crc kubenswrapper[4784]: E0106 08:38:56.204702 4784 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-2n9kz" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovsdb-server" Jan 06 08:38:56 crc kubenswrapper[4784]: E0106 08:38:56.206687 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:56 crc kubenswrapper[4784]: E0106 08:38:56.208802 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:56 crc kubenswrapper[4784]: E0106 08:38:56.212126 4784 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 06 08:38:56 crc kubenswrapper[4784]: E0106 08:38:56.212210 4784 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-2n9kz" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovs-vswitchd" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.548906 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2n9kz_da17dffd-4ff8-4df2-8701-2e910a4c5131/ovs-vswitchd/0.log" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.550705 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.603383 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2bc0281-fc27-4766-87fa-f16599938e96" containerID="47cf27a1d579d06ca4f6c0124e1a7218eaa708d4b9f10e03cf5124c1b88d16b7" exitCode=137 Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.603473 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"47cf27a1d579d06ca4f6c0124e1a7218eaa708d4b9f10e03cf5124c1b88d16b7"} Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.607152 4784 generic.go:334] "Generic (PLEG): container finished" podID="162189cc-1d37-4526-b83c-f36183f40b49" containerID="5d81264728e21f0de5897f691f16a0a600c9fc6f290f4b6c5b4cf42420927001" exitCode=137 Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.607215 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"162189cc-1d37-4526-b83c-f36183f40b49","Type":"ContainerDied","Data":"5d81264728e21f0de5897f691f16a0a600c9fc6f290f4b6c5b4cf42420927001"} Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.610613 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2n9kz_da17dffd-4ff8-4df2-8701-2e910a4c5131/ovs-vswitchd/0.log" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.612677 4784 generic.go:334] "Generic (PLEG): container finished" podID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" exitCode=137 Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.612713 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-2n9kz" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.612739 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2n9kz" event={"ID":"da17dffd-4ff8-4df2-8701-2e910a4c5131","Type":"ContainerDied","Data":"2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab"} Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.612894 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2n9kz" event={"ID":"da17dffd-4ff8-4df2-8701-2e910a4c5131","Type":"ContainerDied","Data":"eb661efc6a61209fd0abdd310f5b4a6894bbc30a550cee838d40a6cc025f9b13"} Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.612934 4784 scope.go:117] "RemoveContainer" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.645411 4784 scope.go:117] "RemoveContainer" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.674427 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bpm56\" (UniqueName: \"kubernetes.io/projected/da17dffd-4ff8-4df2-8701-2e910a4c5131-kube-api-access-bpm56\") pod \"da17dffd-4ff8-4df2-8701-2e910a4c5131\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.674535 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da17dffd-4ff8-4df2-8701-2e910a4c5131-scripts\") pod \"da17dffd-4ff8-4df2-8701-2e910a4c5131\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.674709 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-run\") pod \"da17dffd-4ff8-4df2-8701-2e910a4c5131\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.674744 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-log\") pod \"da17dffd-4ff8-4df2-8701-2e910a4c5131\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.674789 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-lib\") pod \"da17dffd-4ff8-4df2-8701-2e910a4c5131\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.674860 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-run" (OuterVolumeSpecName: "var-run") pod "da17dffd-4ff8-4df2-8701-2e910a4c5131" (UID: "da17dffd-4ff8-4df2-8701-2e910a4c5131"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.674886 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-etc-ovs\") pod \"da17dffd-4ff8-4df2-8701-2e910a4c5131\" (UID: \"da17dffd-4ff8-4df2-8701-2e910a4c5131\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.674932 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "da17dffd-4ff8-4df2-8701-2e910a4c5131" (UID: "da17dffd-4ff8-4df2-8701-2e910a4c5131"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.674940 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-log" (OuterVolumeSpecName: "var-log") pod "da17dffd-4ff8-4df2-8701-2e910a4c5131" (UID: "da17dffd-4ff8-4df2-8701-2e910a4c5131"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.674932 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-lib" (OuterVolumeSpecName: "var-lib") pod "da17dffd-4ff8-4df2-8701-2e910a4c5131" (UID: "da17dffd-4ff8-4df2-8701-2e910a4c5131"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.675754 4784 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-run\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.675781 4784 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-log\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.675789 4784 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-var-lib\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.675799 4784 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/da17dffd-4ff8-4df2-8701-2e910a4c5131-etc-ovs\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.676093 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da17dffd-4ff8-4df2-8701-2e910a4c5131-scripts" (OuterVolumeSpecName: "scripts") pod "da17dffd-4ff8-4df2-8701-2e910a4c5131" (UID: "da17dffd-4ff8-4df2-8701-2e910a4c5131"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.681860 4784 scope.go:117] "RemoveContainer" containerID="26b64c3522b89267f29da3ed866ae7f9fcf46409fd5357b324294ff85698d46c" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.691854 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da17dffd-4ff8-4df2-8701-2e910a4c5131-kube-api-access-bpm56" (OuterVolumeSpecName: "kube-api-access-bpm56") pod "da17dffd-4ff8-4df2-8701-2e910a4c5131" (UID: "da17dffd-4ff8-4df2-8701-2e910a4c5131"). InnerVolumeSpecName "kube-api-access-bpm56". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.776142 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.776983 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da17dffd-4ff8-4df2-8701-2e910a4c5131-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.777028 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bpm56\" (UniqueName: \"kubernetes.io/projected/da17dffd-4ff8-4df2-8701-2e910a4c5131-kube-api-access-bpm56\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.800763 4784 scope.go:117] "RemoveContainer" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" Jan 06 08:38:57 crc kubenswrapper[4784]: E0106 08:38:57.801706 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab\": container with ID starting with 2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab not found: ID does not exist" containerID="2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.801776 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab"} err="failed to get container status \"2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab\": rpc error: code = NotFound desc = could not find container \"2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab\": container with ID starting with 2e364af1d5ccefe56e2cf42d40703d1e8523ad0657dee0895cc92fe4fdb7a2ab not found: ID does not exist" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.801825 4784 scope.go:117] "RemoveContainer" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" Jan 06 08:38:57 crc kubenswrapper[4784]: E0106 08:38:57.802637 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83\": container with ID starting with d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 not found: ID does not exist" containerID="d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.802683 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83"} err="failed to get container status \"d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83\": rpc error: code = NotFound desc = could not find container \"d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83\": container with ID starting with d2390e3ba9136849d0816a4f169032166ba427f2768f0427093f64386099cd83 not found: ID does not exist" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.802731 4784 scope.go:117] "RemoveContainer" containerID="26b64c3522b89267f29da3ed866ae7f9fcf46409fd5357b324294ff85698d46c" Jan 06 08:38:57 crc kubenswrapper[4784]: E0106 08:38:57.803156 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26b64c3522b89267f29da3ed866ae7f9fcf46409fd5357b324294ff85698d46c\": container with ID starting with 26b64c3522b89267f29da3ed866ae7f9fcf46409fd5357b324294ff85698d46c not found: ID does not exist" containerID="26b64c3522b89267f29da3ed866ae7f9fcf46409fd5357b324294ff85698d46c" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.803190 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26b64c3522b89267f29da3ed866ae7f9fcf46409fd5357b324294ff85698d46c"} err="failed to get container status \"26b64c3522b89267f29da3ed866ae7f9fcf46409fd5357b324294ff85698d46c\": rpc error: code = NotFound desc = could not find container \"26b64c3522b89267f29da3ed866ae7f9fcf46409fd5357b324294ff85698d46c\": container with ID starting with 26b64c3522b89267f29da3ed866ae7f9fcf46409fd5357b324294ff85698d46c not found: ID does not exist" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.873265 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.877863 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-config-data-custom\") pod \"162189cc-1d37-4526-b83c-f36183f40b49\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.877938 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/162189cc-1d37-4526-b83c-f36183f40b49-etc-machine-id\") pod \"162189cc-1d37-4526-b83c-f36183f40b49\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.878030 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dm6ht\" (UniqueName: \"kubernetes.io/projected/162189cc-1d37-4526-b83c-f36183f40b49-kube-api-access-dm6ht\") pod \"162189cc-1d37-4526-b83c-f36183f40b49\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.878065 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-combined-ca-bundle\") pod \"162189cc-1d37-4526-b83c-f36183f40b49\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.878086 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-scripts\") pod \"162189cc-1d37-4526-b83c-f36183f40b49\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.878108 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-config-data\") pod \"162189cc-1d37-4526-b83c-f36183f40b49\" (UID: \"162189cc-1d37-4526-b83c-f36183f40b49\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.878624 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/162189cc-1d37-4526-b83c-f36183f40b49-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "162189cc-1d37-4526-b83c-f36183f40b49" (UID: "162189cc-1d37-4526-b83c-f36183f40b49"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.882252 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "162189cc-1d37-4526-b83c-f36183f40b49" (UID: "162189cc-1d37-4526-b83c-f36183f40b49"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.882812 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-scripts" (OuterVolumeSpecName: "scripts") pod "162189cc-1d37-4526-b83c-f36183f40b49" (UID: "162189cc-1d37-4526-b83c-f36183f40b49"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.883969 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/162189cc-1d37-4526-b83c-f36183f40b49-kube-api-access-dm6ht" (OuterVolumeSpecName: "kube-api-access-dm6ht") pod "162189cc-1d37-4526-b83c-f36183f40b49" (UID: "162189cc-1d37-4526-b83c-f36183f40b49"). InnerVolumeSpecName "kube-api-access-dm6ht". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.937043 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "162189cc-1d37-4526-b83c-f36183f40b49" (UID: "162189cc-1d37-4526-b83c-f36183f40b49"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.955683 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-2n9kz"] Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.969605 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-2n9kz"] Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.975342 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-config-data" (OuterVolumeSpecName: "config-data") pod "162189cc-1d37-4526-b83c-f36183f40b49" (UID: "162189cc-1d37-4526-b83c-f36183f40b49"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.980019 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/a2bc0281-fc27-4766-87fa-f16599938e96-lock\") pod \"a2bc0281-fc27-4766-87fa-f16599938e96\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.980095 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"a2bc0281-fc27-4766-87fa-f16599938e96\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.980208 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/a2bc0281-fc27-4766-87fa-f16599938e96-cache\") pod \"a2bc0281-fc27-4766-87fa-f16599938e96\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.980251 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift\") pod \"a2bc0281-fc27-4766-87fa-f16599938e96\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.980278 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pg6c9\" (UniqueName: \"kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-kube-api-access-pg6c9\") pod \"a2bc0281-fc27-4766-87fa-f16599938e96\" (UID: \"a2bc0281-fc27-4766-87fa-f16599938e96\") " Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.980653 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dm6ht\" (UniqueName: \"kubernetes.io/projected/162189cc-1d37-4526-b83c-f36183f40b49-kube-api-access-dm6ht\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.980682 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.980698 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.980698 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2bc0281-fc27-4766-87fa-f16599938e96-lock" (OuterVolumeSpecName: "lock") pod "a2bc0281-fc27-4766-87fa-f16599938e96" (UID: "a2bc0281-fc27-4766-87fa-f16599938e96"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.980713 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.980784 4784 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/162189cc-1d37-4526-b83c-f36183f40b49-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.980799 4784 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/162189cc-1d37-4526-b83c-f36183f40b49-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.981259 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2bc0281-fc27-4766-87fa-f16599938e96-cache" (OuterVolumeSpecName: "cache") pod "a2bc0281-fc27-4766-87fa-f16599938e96" (UID: "a2bc0281-fc27-4766-87fa-f16599938e96"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.984482 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-kube-api-access-pg6c9" (OuterVolumeSpecName: "kube-api-access-pg6c9") pod "a2bc0281-fc27-4766-87fa-f16599938e96" (UID: "a2bc0281-fc27-4766-87fa-f16599938e96"). InnerVolumeSpecName "kube-api-access-pg6c9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.984649 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "swift") pod "a2bc0281-fc27-4766-87fa-f16599938e96" (UID: "a2bc0281-fc27-4766-87fa-f16599938e96"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 06 08:38:57 crc kubenswrapper[4784]: I0106 08:38:57.984835 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "a2bc0281-fc27-4766-87fa-f16599938e96" (UID: "a2bc0281-fc27-4766-87fa-f16599938e96"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.082742 4784 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/a2bc0281-fc27-4766-87fa-f16599938e96-lock\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.082809 4784 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.082824 4784 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/a2bc0281-fc27-4766-87fa-f16599938e96-cache\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.082839 4784 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.082852 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pg6c9\" (UniqueName: \"kubernetes.io/projected/a2bc0281-fc27-4766-87fa-f16599938e96-kube-api-access-pg6c9\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.098090 4784 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.184506 4784 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.334058 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" path="/var/lib/kubelet/pods/da17dffd-4ff8-4df2-8701-2e910a4c5131/volumes" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.437136 4784 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podd1dc9219-aca3-47c5-b8f7-37799235c2a9"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podd1dc9219-aca3-47c5-b8f7-37799235c2a9] : Timed out while waiting for systemd to remove kubepods-besteffort-podd1dc9219_aca3_47c5_b8f7_37799235c2a9.slice" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.638178 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a2bc0281-fc27-4766-87fa-f16599938e96","Type":"ContainerDied","Data":"43d68d465367327138f9318b678acd553a47ebe35728cf4ca49b45e2b8308e3c"} Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.638257 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.638257 4784 scope.go:117] "RemoveContainer" containerID="47cf27a1d579d06ca4f6c0124e1a7218eaa708d4b9f10e03cf5124c1b88d16b7" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.647003 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"162189cc-1d37-4526-b83c-f36183f40b49","Type":"ContainerDied","Data":"4de6166ea7bf2f0199cb7a4fddb9849d8c440f2ecf852e061c868b05bca6d4e1"} Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.647745 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.686728 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.687287 4784 scope.go:117] "RemoveContainer" containerID="4ba7f085163f761a926fc23b3df8baf41bed014e786fa75f064425ec412d6aac" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.694646 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.715517 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.726008 4784 scope.go:117] "RemoveContainer" containerID="92c958d9eb2eef729c21a59d1aa5cd51f0a60f0eee60721df4067e8956812f0d" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.729236 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.760606 4784 scope.go:117] "RemoveContainer" containerID="2782e4e954d402d8644c704c14fc8b38760f649dd39ca5a39f52b8e5a86c03a1" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.795021 4784 scope.go:117] "RemoveContainer" containerID="c87da68debafaf9f2a7c72f8afee9751c29efd20b0a9f522e25b46cbb9829297" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.826160 4784 scope.go:117] "RemoveContainer" containerID="7d3b4c93b777e722f9e32993854eee837ecfe60db1b929d1c3283452b9fc478a" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.855896 4784 scope.go:117] "RemoveContainer" containerID="048d0fdb57c0dd31a395544856c4165cd53c4598bcc0c60dcd03e55e8e8cb6bd" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.887779 4784 scope.go:117] "RemoveContainer" containerID="fdfc98f3c2d5c499ec740f96d17f27d1a06a4729300bbfc140e64fc4172f8f42" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.917170 4784 scope.go:117] "RemoveContainer" containerID="26e897885f00517a75035b5f5164eb8d210634e06114a25a61554dd7abccebce" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.951232 4784 scope.go:117] "RemoveContainer" containerID="bf572454575eb77381e725ef9250e47418c50e419d4f2e3931a28d2c8d07717d" Jan 06 08:38:58 crc kubenswrapper[4784]: I0106 08:38:58.976501 4784 scope.go:117] "RemoveContainer" containerID="869eb46b39bba54be94fbd147453143836362ba61706362d4c5f22a8bd537f78" Jan 06 08:38:59 crc kubenswrapper[4784]: I0106 08:38:59.003820 4784 scope.go:117] "RemoveContainer" containerID="4e841199dad3d57bec03b6ab32443378e9a39fb3254ba456545d280228b18564" Jan 06 08:38:59 crc kubenswrapper[4784]: I0106 08:38:59.034499 4784 scope.go:117] "RemoveContainer" containerID="0410f39e8c4c8bc197844907a33893de7c643c3e9ad4fa4c7538fe09ef8e89e5" Jan 06 08:38:59 crc kubenswrapper[4784]: I0106 08:38:59.066865 4784 scope.go:117] "RemoveContainer" containerID="353a0302ae512e9895ae799f830966d4415b4ddf7909c1b5ffbcba497511d1de" Jan 06 08:38:59 crc kubenswrapper[4784]: I0106 08:38:59.104846 4784 scope.go:117] "RemoveContainer" containerID="6113f42bede3a27a0cb54b6277716f1e062747b6f20d0f6170df3e915df36563" Jan 06 08:38:59 crc kubenswrapper[4784]: I0106 08:38:59.145496 4784 scope.go:117] "RemoveContainer" containerID="254f5436d55af633a01d076f3a43e18f1370e7e15307f5c1d0c79c703303c8f9" Jan 06 08:38:59 crc kubenswrapper[4784]: I0106 08:38:59.172405 4784 scope.go:117] "RemoveContainer" containerID="5d81264728e21f0de5897f691f16a0a600c9fc6f290f4b6c5b4cf42420927001" Jan 06 08:39:00 crc kubenswrapper[4784]: I0106 08:39:00.330782 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="162189cc-1d37-4526-b83c-f36183f40b49" path="/var/lib/kubelet/pods/162189cc-1d37-4526-b83c-f36183f40b49/volumes" Jan 06 08:39:00 crc kubenswrapper[4784]: I0106 08:39:00.332897 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" path="/var/lib/kubelet/pods/a2bc0281-fc27-4766-87fa-f16599938e96/volumes" Jan 06 08:39:00 crc kubenswrapper[4784]: I0106 08:39:00.723981 4784 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podde90cd55-5430-48c7-b519-a4398e495607"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podde90cd55-5430-48c7-b519-a4398e495607] : Timed out while waiting for systemd to remove kubepods-besteffort-podde90cd55_5430_48c7_b519_a4398e495607.slice" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.419907 4784 scope.go:117] "RemoveContainer" containerID="90f90ea921b8da7940600a58d3af16d3747f97f35a9b4046001ef979cb52ee03" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.470898 4784 scope.go:117] "RemoveContainer" containerID="f76c7ab82f482cec8a852fe97900303168514f1417b4ff6c7482e190aee111f0" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.511442 4784 scope.go:117] "RemoveContainer" containerID="2da9f9b19996f605daa4e9a95f48246d59861d31537a6b9ba5feac254dd496e7" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.552415 4784 scope.go:117] "RemoveContainer" containerID="1353ca0bb2b5d2e016258e6808bc64f6f37476683b15af95e05da9eef1f4381e" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.591878 4784 scope.go:117] "RemoveContainer" containerID="9ec8b34270c8cb61ca7735cf44513c82b7b7b7a7970712874d5239863d652840" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.658762 4784 scope.go:117] "RemoveContainer" containerID="474a03023859df3ab17cd19508195c766002aabe9d1bfb5bf14278ae49ecd12b" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.681031 4784 scope.go:117] "RemoveContainer" containerID="6aed5d5bdb3e840d82bc70bce9e03cc3f9557785201d5d0f6120d8c4bbbe42b2" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.719148 4784 scope.go:117] "RemoveContainer" containerID="03dc15805f46528c7cb3d4d2d54a65a050d2a7e05316bc87de7d034099857e2d" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.753386 4784 scope.go:117] "RemoveContainer" containerID="8a4a02d92a8a711ca0ebd750090283fa911d0db029a5a0b4b70362192329d69d" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.780280 4784 scope.go:117] "RemoveContainer" containerID="2c45ac306d7abdef48f3e6ee949c4ec7dc07bfed1e934d3efbf3a711946837e2" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.814814 4784 scope.go:117] "RemoveContainer" containerID="6443e9dad677f700ab0c1e5c54226984deea521b5e8b2b7abf11054f95a416e2" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.837610 4784 scope.go:117] "RemoveContainer" containerID="e1117f40ed13f63cf96a0926c33e046c914afc8710bb6b882701784248e89db6" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.867351 4784 scope.go:117] "RemoveContainer" containerID="a57052782aaf0ace5ddc1d07ceff1112413c84255f703c8070bf5c16fd07cf08" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.889078 4784 scope.go:117] "RemoveContainer" containerID="1c912752fe46d84d9ed42c24893228b3462b90a1f34398c91345bd50a4f499de" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.905943 4784 scope.go:117] "RemoveContainer" containerID="03a6685581a4e19c9381b6823b522e80b5572a31d8881d1df2fd2be3a6a2817e" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.933875 4784 scope.go:117] "RemoveContainer" containerID="5c0e38915efc283eeec7d3f028569e5f0d8bb0816bdfbc43c74e9799b2ed11e1" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.957210 4784 scope.go:117] "RemoveContainer" containerID="b9c5949b3a85da1a9f1fb60412152f538ba8d8fc52bb6212a53de6e651a54857" Jan 06 08:40:12 crc kubenswrapper[4784]: I0106 08:40:12.990884 4784 scope.go:117] "RemoveContainer" containerID="d7d99f6e0d7bff947f92366dfc08c9c548a7c8b06a7d668594a1f293286f3f14" Jan 06 08:40:13 crc kubenswrapper[4784]: I0106 08:40:13.015110 4784 scope.go:117] "RemoveContainer" containerID="7aa2022b04e1d7e859677179f803d6836738f66967d19b38d26815b72e459e21" Jan 06 08:40:13 crc kubenswrapper[4784]: I0106 08:40:13.038685 4784 scope.go:117] "RemoveContainer" containerID="f266446de084888b268e8cd715a99fdc4b5092a328d89ca889e7c734c4e993cf" Jan 06 08:40:13 crc kubenswrapper[4784]: I0106 08:40:13.060822 4784 scope.go:117] "RemoveContainer" containerID="650817aab5852a278d03cc46d30381a8b5a0cbac71aad2d2e3d4d2f3c23bf6bd" Jan 06 08:40:44 crc kubenswrapper[4784]: I0106 08:40:44.351509 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:40:44 crc kubenswrapper[4784]: I0106 08:40:44.352222 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.200270 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-225gx"] Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.201623 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-auditor" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.201648 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-auditor" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.201667 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="rsync" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.201680 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="rsync" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.201698 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41c89df0-d35f-4f47-86f3-71a2c0971d79" containerName="rabbitmq" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.201713 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="41c89df0-d35f-4f47-86f3-71a2c0971d79" containerName="rabbitmq" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.201738 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-expirer" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.201750 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-expirer" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.201767 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1733b30-f3c1-414f-9140-f42583e97d31" containerName="kube-state-metrics" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.201780 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1733b30-f3c1-414f-9140-f42583e97d31" containerName="kube-state-metrics" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.201799 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6a277ac-73de-4e2b-b39f-73d467b2222c" containerName="galera" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.201812 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6a277ac-73de-4e2b-b39f-73d467b2222c" containerName="galera" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.201833 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79cac775-c143-4370-bf3b-b25e2ca62120" containerName="nova-metadata-metadata" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.201848 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="79cac775-c143-4370-bf3b-b25e2ca62120" containerName="nova-metadata-metadata" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.201878 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-auditor" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.201892 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-auditor" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.201905 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cd2b801-83a4-410f-a555-8dfda270713a" containerName="placement-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.201918 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cd2b801-83a4-410f-a555-8dfda270713a" containerName="placement-log" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.201936 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-replicator" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.201952 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-replicator" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.201971 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovsdb-server" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.201983 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovsdb-server" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202002 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="sg-core" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202015 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="sg-core" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202036 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e94ed326-8f56-4933-8616-5814505b58f5" containerName="glance-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202048 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e94ed326-8f56-4933-8616-5814505b58f5" containerName="glance-log" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202073 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="ceilometer-central-agent" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202089 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="ceilometer-central-agent" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202106 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovsdb-server-init" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202120 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovsdb-server-init" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202146 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-server" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202159 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-server" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202176 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" containerName="barbican-api-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202189 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" containerName="barbican-api-log" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202214 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ddaa7ef-b912-4b5b-9bfa-820818220eef" containerName="barbican-keystone-listener-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202228 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ddaa7ef-b912-4b5b-9bfa-820818220eef" containerName="barbican-keystone-listener-log" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202244 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="162189cc-1d37-4526-b83c-f36183f40b49" containerName="cinder-scheduler" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202257 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="162189cc-1d37-4526-b83c-f36183f40b49" containerName="cinder-scheduler" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202277 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dd5733d-6502-4030-a012-be296b7d11c1" containerName="nova-cell0-conductor-conductor" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202290 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dd5733d-6502-4030-a012-be296b7d11c1" containerName="nova-cell0-conductor-conductor" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202305 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a5f283f-3491-4531-8213-b2c0eb6b3fc8" containerName="barbican-worker" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202318 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a5f283f-3491-4531-8213-b2c0eb6b3fc8" containerName="barbican-worker" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202340 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-reaper" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202353 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-reaper" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202377 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15cd1678-570e-47b5-bcb0-6745b8aa95cb" containerName="cinder-api-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202390 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="15cd1678-570e-47b5-bcb0-6745b8aa95cb" containerName="cinder-api-log" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202411 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-server" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202424 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-server" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202442 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-replicator" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202454 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-replicator" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202478 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="052ecaa6-58fd-42ed-b2c5-6b8919470619" containerName="rabbitmq" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202491 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="052ecaa6-58fd-42ed-b2c5-6b8919470619" containerName="rabbitmq" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202507 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ddaa7ef-b912-4b5b-9bfa-820818220eef" containerName="barbican-keystone-listener" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202521 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ddaa7ef-b912-4b5b-9bfa-820818220eef" containerName="barbican-keystone-listener" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202541 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" containerName="barbican-api" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202578 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" containerName="barbican-api" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202595 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-updater" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202608 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-updater" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202628 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cd2b801-83a4-410f-a555-8dfda270713a" containerName="placement-api" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202643 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cd2b801-83a4-410f-a555-8dfda270713a" containerName="placement-api" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202670 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fecd8c1e-482d-4469-a884-c357e0e66fe0" containerName="glance-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202684 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="fecd8c1e-482d-4469-a884-c357e0e66fe0" containerName="glance-log" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202710 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6a277ac-73de-4e2b-b39f-73d467b2222c" containerName="mysql-bootstrap" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202723 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6a277ac-73de-4e2b-b39f-73d467b2222c" containerName="mysql-bootstrap" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202744 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-auditor" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202758 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-auditor" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202778 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a5f283f-3491-4531-8213-b2c0eb6b3fc8" containerName="barbican-worker-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202792 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a5f283f-3491-4531-8213-b2c0eb6b3fc8" containerName="barbican-worker-log" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202814 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e94ed326-8f56-4933-8616-5814505b58f5" containerName="glance-httpd" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202827 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e94ed326-8f56-4933-8616-5814505b58f5" containerName="glance-httpd" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202847 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c98eb91-7877-4dd7-b694-52b017726242" containerName="openstack-network-exporter" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202861 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c98eb91-7877-4dd7-b694-52b017726242" containerName="openstack-network-exporter" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202881 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="ceilometer-notification-agent" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202896 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="ceilometer-notification-agent" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202916 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1e87443-2d75-4063-934c-dc593d03987c" containerName="nova-scheduler-scheduler" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202930 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1e87443-2d75-4063-934c-dc593d03987c" containerName="nova-scheduler-scheduler" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202957 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47f75a1e-4d3b-4460-8420-05ac7e981c8e" containerName="keystone-api" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.202971 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="47f75a1e-4d3b-4460-8420-05ac7e981c8e" containerName="keystone-api" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.202990 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41c89df0-d35f-4f47-86f3-71a2c0971d79" containerName="setup-container" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203003 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="41c89df0-d35f-4f47-86f3-71a2c0971d79" containerName="setup-container" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203024 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovs-vswitchd" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203037 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovs-vswitchd" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203062 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="proxy-httpd" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203077 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="proxy-httpd" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203092 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79cac775-c143-4370-bf3b-b25e2ca62120" containerName="nova-metadata-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203106 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="79cac775-c143-4370-bf3b-b25e2ca62120" containerName="nova-metadata-log" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203122 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fecd8c1e-482d-4469-a884-c357e0e66fe0" containerName="glance-httpd" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203136 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="fecd8c1e-482d-4469-a884-c357e0e66fe0" containerName="glance-httpd" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203163 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15cd1678-570e-47b5-bcb0-6745b8aa95cb" containerName="cinder-api" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203179 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="15cd1678-570e-47b5-bcb0-6745b8aa95cb" containerName="cinder-api" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203207 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bed6a7b9-0069-4ea7-b813-70a5808d18db" containerName="neutron-api" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203220 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="bed6a7b9-0069-4ea7-b813-70a5808d18db" containerName="neutron-api" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203240 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19f8ed37-5996-433b-9915-97489c1d8f11" containerName="nova-api-api" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203253 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="19f8ed37-5996-433b-9915-97489c1d8f11" containerName="nova-api-api" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203270 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="052ecaa6-58fd-42ed-b2c5-6b8919470619" containerName="setup-container" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203284 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="052ecaa6-58fd-42ed-b2c5-6b8919470619" containerName="setup-container" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203305 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-updater" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203318 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-updater" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203333 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-replicator" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203347 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-replicator" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203363 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="162189cc-1d37-4526-b83c-f36183f40b49" containerName="probe" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203379 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="162189cc-1d37-4526-b83c-f36183f40b49" containerName="probe" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203394 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="swift-recon-cron" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203408 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="swift-recon-cron" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203435 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bed6a7b9-0069-4ea7-b813-70a5808d18db" containerName="neutron-httpd" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203449 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="bed6a7b9-0069-4ea7-b813-70a5808d18db" containerName="neutron-httpd" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203467 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c98eb91-7877-4dd7-b694-52b017726242" containerName="ovn-northd" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203481 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c98eb91-7877-4dd7-b694-52b017726242" containerName="ovn-northd" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203501 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19f8ed37-5996-433b-9915-97489c1d8f11" containerName="nova-api-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203515 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="19f8ed37-5996-433b-9915-97489c1d8f11" containerName="nova-api-log" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203538 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3627acbf-1c12-4e8e-97f0-e44a6cd124c3" containerName="memcached" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203576 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3627acbf-1c12-4e8e-97f0-e44a6cd124c3" containerName="memcached" Jan 06 08:40:56 crc kubenswrapper[4784]: E0106 08:40:56.203592 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-server" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203604 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-server" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203854 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="162189cc-1d37-4526-b83c-f36183f40b49" containerName="cinder-scheduler" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203881 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="162189cc-1d37-4526-b83c-f36183f40b49" containerName="probe" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203901 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-updater" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203925 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="bed6a7b9-0069-4ea7-b813-70a5808d18db" containerName="neutron-api" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203945 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-server" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203962 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a5f283f-3491-4531-8213-b2c0eb6b3fc8" containerName="barbican-worker-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.203979 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="15cd1678-570e-47b5-bcb0-6745b8aa95cb" containerName="cinder-api" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204002 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="19f8ed37-5996-433b-9915-97489c1d8f11" containerName="nova-api-api" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204017 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="fecd8c1e-482d-4469-a884-c357e0e66fe0" containerName="glance-httpd" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204041 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-reaper" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204055 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e94ed326-8f56-4933-8616-5814505b58f5" containerName="glance-httpd" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204069 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a5f283f-3491-4531-8213-b2c0eb6b3fc8" containerName="barbican-worker" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204091 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="sg-core" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204110 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ddaa7ef-b912-4b5b-9bfa-820818220eef" containerName="barbican-keystone-listener-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204127 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" containerName="barbican-api" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204145 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-replicator" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204167 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="ceilometer-central-agent" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204190 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-replicator" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204211 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6a277ac-73de-4e2b-b39f-73d467b2222c" containerName="galera" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204230 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e94ed326-8f56-4933-8616-5814505b58f5" containerName="glance-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204249 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cd2b801-83a4-410f-a555-8dfda270713a" containerName="placement-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204265 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-updater" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204279 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-auditor" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204300 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-server" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204322 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="47f75a1e-4d3b-4460-8420-05ac7e981c8e" containerName="keystone-api" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204338 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="79cac775-c143-4370-bf3b-b25e2ca62120" containerName="nova-metadata-metadata" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204362 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c98eb91-7877-4dd7-b694-52b017726242" containerName="ovn-northd" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204377 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovs-vswitchd" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204392 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cd2b801-83a4-410f-a555-8dfda270713a" containerName="placement-api" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204414 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1e87443-2d75-4063-934c-dc593d03987c" containerName="nova-scheduler-scheduler" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204432 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="da17dffd-4ff8-4df2-8701-2e910a4c5131" containerName="ovsdb-server" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204446 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="proxy-httpd" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204469 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="41c89df0-d35f-4f47-86f3-71a2c0971d79" containerName="rabbitmq" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204484 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ddaa7ef-b912-4b5b-9bfa-820818220eef" containerName="barbican-keystone-listener" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204498 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="19f8ed37-5996-433b-9915-97489c1d8f11" containerName="nova-api-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204514 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="052ecaa6-58fd-42ed-b2c5-6b8919470619" containerName="rabbitmq" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204527 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="rsync" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204574 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="79cac775-c143-4370-bf3b-b25e2ca62120" containerName="nova-metadata-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204597 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b9e9004-d4f9-4c7e-9346-e20c9f796d3d" containerName="ceilometer-notification-agent" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204611 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1733b30-f3c1-414f-9140-f42583e97d31" containerName="kube-state-metrics" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204630 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-expirer" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204648 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-auditor" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204671 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="3627acbf-1c12-4e8e-97f0-e44a6cd124c3" containerName="memcached" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204691 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="15cd1678-570e-47b5-bcb0-6745b8aa95cb" containerName="cinder-api-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204714 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="object-auditor" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204732 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="container-replicator" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204753 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="db32cfd9-0522-4dc5-b8fd-0cb61d08efd1" containerName="barbican-api-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204776 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="account-server" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204794 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bc0281-fc27-4766-87fa-f16599938e96" containerName="swift-recon-cron" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204811 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="fecd8c1e-482d-4469-a884-c357e0e66fe0" containerName="glance-log" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204823 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dd5733d-6502-4030-a012-be296b7d11c1" containerName="nova-cell0-conductor-conductor" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204842 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c98eb91-7877-4dd7-b694-52b017726242" containerName="openstack-network-exporter" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.204859 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="bed6a7b9-0069-4ea7-b813-70a5808d18db" containerName="neutron-httpd" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.206802 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.238995 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-225gx"] Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.317643 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd39697c-67dd-4b73-80ed-ceddf832375a-utilities\") pod \"redhat-marketplace-225gx\" (UID: \"cd39697c-67dd-4b73-80ed-ceddf832375a\") " pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.317999 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd39697c-67dd-4b73-80ed-ceddf832375a-catalog-content\") pod \"redhat-marketplace-225gx\" (UID: \"cd39697c-67dd-4b73-80ed-ceddf832375a\") " pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.318084 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gt9vs\" (UniqueName: \"kubernetes.io/projected/cd39697c-67dd-4b73-80ed-ceddf832375a-kube-api-access-gt9vs\") pod \"redhat-marketplace-225gx\" (UID: \"cd39697c-67dd-4b73-80ed-ceddf832375a\") " pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.419868 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd39697c-67dd-4b73-80ed-ceddf832375a-catalog-content\") pod \"redhat-marketplace-225gx\" (UID: \"cd39697c-67dd-4b73-80ed-ceddf832375a\") " pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.419948 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gt9vs\" (UniqueName: \"kubernetes.io/projected/cd39697c-67dd-4b73-80ed-ceddf832375a-kube-api-access-gt9vs\") pod \"redhat-marketplace-225gx\" (UID: \"cd39697c-67dd-4b73-80ed-ceddf832375a\") " pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.420021 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd39697c-67dd-4b73-80ed-ceddf832375a-utilities\") pod \"redhat-marketplace-225gx\" (UID: \"cd39697c-67dd-4b73-80ed-ceddf832375a\") " pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.420483 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd39697c-67dd-4b73-80ed-ceddf832375a-catalog-content\") pod \"redhat-marketplace-225gx\" (UID: \"cd39697c-67dd-4b73-80ed-ceddf832375a\") " pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.420611 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd39697c-67dd-4b73-80ed-ceddf832375a-utilities\") pod \"redhat-marketplace-225gx\" (UID: \"cd39697c-67dd-4b73-80ed-ceddf832375a\") " pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.447590 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gt9vs\" (UniqueName: \"kubernetes.io/projected/cd39697c-67dd-4b73-80ed-ceddf832375a-kube-api-access-gt9vs\") pod \"redhat-marketplace-225gx\" (UID: \"cd39697c-67dd-4b73-80ed-ceddf832375a\") " pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:40:56 crc kubenswrapper[4784]: I0106 08:40:56.537410 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:40:57 crc kubenswrapper[4784]: I0106 08:40:57.041964 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-225gx"] Jan 06 08:40:57 crc kubenswrapper[4784]: I0106 08:40:57.097023 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-225gx" event={"ID":"cd39697c-67dd-4b73-80ed-ceddf832375a","Type":"ContainerStarted","Data":"4a3b952839afbfd894eec14630a075ef69bf480b7ea0079db2401b7551a44b15"} Jan 06 08:40:58 crc kubenswrapper[4784]: I0106 08:40:58.110272 4784 generic.go:334] "Generic (PLEG): container finished" podID="cd39697c-67dd-4b73-80ed-ceddf832375a" containerID="16d409543bd74263e917578849d0b54b9c2c5d72d080ddde02b505b82d66968c" exitCode=0 Jan 06 08:40:58 crc kubenswrapper[4784]: I0106 08:40:58.110793 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-225gx" event={"ID":"cd39697c-67dd-4b73-80ed-ceddf832375a","Type":"ContainerDied","Data":"16d409543bd74263e917578849d0b54b9c2c5d72d080ddde02b505b82d66968c"} Jan 06 08:40:59 crc kubenswrapper[4784]: I0106 08:40:59.126949 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-225gx" event={"ID":"cd39697c-67dd-4b73-80ed-ceddf832375a","Type":"ContainerStarted","Data":"384c58ce53a834e7d476bfaf19432234773f2db0c0da7c5690ebda7fe3820b23"} Jan 06 08:41:00 crc kubenswrapper[4784]: I0106 08:41:00.143972 4784 generic.go:334] "Generic (PLEG): container finished" podID="cd39697c-67dd-4b73-80ed-ceddf832375a" containerID="384c58ce53a834e7d476bfaf19432234773f2db0c0da7c5690ebda7fe3820b23" exitCode=0 Jan 06 08:41:00 crc kubenswrapper[4784]: I0106 08:41:00.144060 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-225gx" event={"ID":"cd39697c-67dd-4b73-80ed-ceddf832375a","Type":"ContainerDied","Data":"384c58ce53a834e7d476bfaf19432234773f2db0c0da7c5690ebda7fe3820b23"} Jan 06 08:41:01 crc kubenswrapper[4784]: I0106 08:41:01.156944 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-225gx" event={"ID":"cd39697c-67dd-4b73-80ed-ceddf832375a","Type":"ContainerStarted","Data":"7017ce5a8915554dc5cf88a2e7e3fba9f6ea76caee0bc561aecbc336d35a038a"} Jan 06 08:41:01 crc kubenswrapper[4784]: I0106 08:41:01.187000 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-225gx" podStartSLOduration=2.666101221 podStartE2EDuration="5.186977047s" podCreationTimestamp="2026-01-06 08:40:56 +0000 UTC" firstStartedPulling="2026-01-06 08:40:58.113636688 +0000 UTC m=+1560.159809555" lastFinishedPulling="2026-01-06 08:41:00.634512514 +0000 UTC m=+1562.680685381" observedRunningTime="2026-01-06 08:41:01.180393032 +0000 UTC m=+1563.226565929" watchObservedRunningTime="2026-01-06 08:41:01.186977047 +0000 UTC m=+1563.233149884" Jan 06 08:41:06 crc kubenswrapper[4784]: I0106 08:41:06.538513 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:41:06 crc kubenswrapper[4784]: I0106 08:41:06.540466 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:41:06 crc kubenswrapper[4784]: I0106 08:41:06.620309 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:41:07 crc kubenswrapper[4784]: I0106 08:41:07.291487 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:41:07 crc kubenswrapper[4784]: I0106 08:41:07.363292 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-225gx"] Jan 06 08:41:09 crc kubenswrapper[4784]: I0106 08:41:09.239717 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-225gx" podUID="cd39697c-67dd-4b73-80ed-ceddf832375a" containerName="registry-server" containerID="cri-o://7017ce5a8915554dc5cf88a2e7e3fba9f6ea76caee0bc561aecbc336d35a038a" gracePeriod=2 Jan 06 08:41:10 crc kubenswrapper[4784]: I0106 08:41:10.255436 4784 generic.go:334] "Generic (PLEG): container finished" podID="cd39697c-67dd-4b73-80ed-ceddf832375a" containerID="7017ce5a8915554dc5cf88a2e7e3fba9f6ea76caee0bc561aecbc336d35a038a" exitCode=0 Jan 06 08:41:10 crc kubenswrapper[4784]: I0106 08:41:10.255821 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-225gx" event={"ID":"cd39697c-67dd-4b73-80ed-ceddf832375a","Type":"ContainerDied","Data":"7017ce5a8915554dc5cf88a2e7e3fba9f6ea76caee0bc561aecbc336d35a038a"} Jan 06 08:41:10 crc kubenswrapper[4784]: I0106 08:41:10.255934 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-225gx" event={"ID":"cd39697c-67dd-4b73-80ed-ceddf832375a","Type":"ContainerDied","Data":"4a3b952839afbfd894eec14630a075ef69bf480b7ea0079db2401b7551a44b15"} Jan 06 08:41:10 crc kubenswrapper[4784]: I0106 08:41:10.255963 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a3b952839afbfd894eec14630a075ef69bf480b7ea0079db2401b7551a44b15" Jan 06 08:41:10 crc kubenswrapper[4784]: I0106 08:41:10.261433 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:41:10 crc kubenswrapper[4784]: I0106 08:41:10.347411 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gt9vs\" (UniqueName: \"kubernetes.io/projected/cd39697c-67dd-4b73-80ed-ceddf832375a-kube-api-access-gt9vs\") pod \"cd39697c-67dd-4b73-80ed-ceddf832375a\" (UID: \"cd39697c-67dd-4b73-80ed-ceddf832375a\") " Jan 06 08:41:10 crc kubenswrapper[4784]: I0106 08:41:10.347478 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd39697c-67dd-4b73-80ed-ceddf832375a-catalog-content\") pod \"cd39697c-67dd-4b73-80ed-ceddf832375a\" (UID: \"cd39697c-67dd-4b73-80ed-ceddf832375a\") " Jan 06 08:41:10 crc kubenswrapper[4784]: I0106 08:41:10.347727 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd39697c-67dd-4b73-80ed-ceddf832375a-utilities\") pod \"cd39697c-67dd-4b73-80ed-ceddf832375a\" (UID: \"cd39697c-67dd-4b73-80ed-ceddf832375a\") " Jan 06 08:41:10 crc kubenswrapper[4784]: I0106 08:41:10.349856 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd39697c-67dd-4b73-80ed-ceddf832375a-utilities" (OuterVolumeSpecName: "utilities") pod "cd39697c-67dd-4b73-80ed-ceddf832375a" (UID: "cd39697c-67dd-4b73-80ed-ceddf832375a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:41:10 crc kubenswrapper[4784]: I0106 08:41:10.359026 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd39697c-67dd-4b73-80ed-ceddf832375a-kube-api-access-gt9vs" (OuterVolumeSpecName: "kube-api-access-gt9vs") pod "cd39697c-67dd-4b73-80ed-ceddf832375a" (UID: "cd39697c-67dd-4b73-80ed-ceddf832375a"). InnerVolumeSpecName "kube-api-access-gt9vs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:41:10 crc kubenswrapper[4784]: I0106 08:41:10.388789 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd39697c-67dd-4b73-80ed-ceddf832375a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cd39697c-67dd-4b73-80ed-ceddf832375a" (UID: "cd39697c-67dd-4b73-80ed-ceddf832375a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:41:10 crc kubenswrapper[4784]: I0106 08:41:10.449959 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gt9vs\" (UniqueName: \"kubernetes.io/projected/cd39697c-67dd-4b73-80ed-ceddf832375a-kube-api-access-gt9vs\") on node \"crc\" DevicePath \"\"" Jan 06 08:41:10 crc kubenswrapper[4784]: I0106 08:41:10.450023 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd39697c-67dd-4b73-80ed-ceddf832375a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:41:10 crc kubenswrapper[4784]: I0106 08:41:10.450044 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd39697c-67dd-4b73-80ed-ceddf832375a-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:41:11 crc kubenswrapper[4784]: I0106 08:41:11.267678 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-225gx" Jan 06 08:41:11 crc kubenswrapper[4784]: I0106 08:41:11.322014 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-225gx"] Jan 06 08:41:11 crc kubenswrapper[4784]: I0106 08:41:11.333333 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-225gx"] Jan 06 08:41:12 crc kubenswrapper[4784]: I0106 08:41:12.328015 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd39697c-67dd-4b73-80ed-ceddf832375a" path="/var/lib/kubelet/pods/cd39697c-67dd-4b73-80ed-ceddf832375a/volumes" Jan 06 08:41:13 crc kubenswrapper[4784]: I0106 08:41:13.458009 4784 scope.go:117] "RemoveContainer" containerID="4054deb074a112412430df78be1ca870064ab339204c29f58878b697b84964a7" Jan 06 08:41:13 crc kubenswrapper[4784]: I0106 08:41:13.508853 4784 scope.go:117] "RemoveContainer" containerID="feed05ef6c38396759ec4e80765ca0e6db6cc4f47e245c22ac358cdc7ef876ba" Jan 06 08:41:13 crc kubenswrapper[4784]: I0106 08:41:13.554561 4784 scope.go:117] "RemoveContainer" containerID="d5c3b203e197556bb2274e3877734a79c06ae2fda8e58528211f22a31d41b9ad" Jan 06 08:41:13 crc kubenswrapper[4784]: I0106 08:41:13.617389 4784 scope.go:117] "RemoveContainer" containerID="2d24fcba65829c8263c5636ac218b33b703ab3269bed25bdf38a8d29dd40237d" Jan 06 08:41:13 crc kubenswrapper[4784]: I0106 08:41:13.681689 4784 scope.go:117] "RemoveContainer" containerID="253b84f8fc7e05254da0d6eb160f615eb44959aa74159d0d375915b64cc99b79" Jan 06 08:41:13 crc kubenswrapper[4784]: I0106 08:41:13.716219 4784 scope.go:117] "RemoveContainer" containerID="3737d9c9a4f7a004d041d4ac5573b4a1033d115a765b7915b0da0f343e6743af" Jan 06 08:41:14 crc kubenswrapper[4784]: I0106 08:41:14.351791 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:41:14 crc kubenswrapper[4784]: I0106 08:41:14.351882 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:41:14 crc kubenswrapper[4784]: I0106 08:41:14.989524 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5m5zm"] Jan 06 08:41:14 crc kubenswrapper[4784]: E0106 08:41:14.990116 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd39697c-67dd-4b73-80ed-ceddf832375a" containerName="extract-utilities" Jan 06 08:41:14 crc kubenswrapper[4784]: I0106 08:41:14.990136 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd39697c-67dd-4b73-80ed-ceddf832375a" containerName="extract-utilities" Jan 06 08:41:14 crc kubenswrapper[4784]: E0106 08:41:14.990180 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd39697c-67dd-4b73-80ed-ceddf832375a" containerName="registry-server" Jan 06 08:41:14 crc kubenswrapper[4784]: I0106 08:41:14.990188 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd39697c-67dd-4b73-80ed-ceddf832375a" containerName="registry-server" Jan 06 08:41:14 crc kubenswrapper[4784]: E0106 08:41:14.990204 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd39697c-67dd-4b73-80ed-ceddf832375a" containerName="extract-content" Jan 06 08:41:14 crc kubenswrapper[4784]: I0106 08:41:14.990213 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd39697c-67dd-4b73-80ed-ceddf832375a" containerName="extract-content" Jan 06 08:41:14 crc kubenswrapper[4784]: I0106 08:41:14.990410 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd39697c-67dd-4b73-80ed-ceddf832375a" containerName="registry-server" Jan 06 08:41:14 crc kubenswrapper[4784]: I0106 08:41:14.991692 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:15 crc kubenswrapper[4784]: I0106 08:41:15.014359 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5m5zm"] Jan 06 08:41:15 crc kubenswrapper[4784]: I0106 08:41:15.039055 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rp9v\" (UniqueName: \"kubernetes.io/projected/1cbb5a43-4275-45f0-ab13-1aff4767ea16-kube-api-access-7rp9v\") pod \"community-operators-5m5zm\" (UID: \"1cbb5a43-4275-45f0-ab13-1aff4767ea16\") " pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:15 crc kubenswrapper[4784]: I0106 08:41:15.039147 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cbb5a43-4275-45f0-ab13-1aff4767ea16-catalog-content\") pod \"community-operators-5m5zm\" (UID: \"1cbb5a43-4275-45f0-ab13-1aff4767ea16\") " pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:15 crc kubenswrapper[4784]: I0106 08:41:15.039334 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cbb5a43-4275-45f0-ab13-1aff4767ea16-utilities\") pod \"community-operators-5m5zm\" (UID: \"1cbb5a43-4275-45f0-ab13-1aff4767ea16\") " pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:15 crc kubenswrapper[4784]: I0106 08:41:15.141335 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cbb5a43-4275-45f0-ab13-1aff4767ea16-utilities\") pod \"community-operators-5m5zm\" (UID: \"1cbb5a43-4275-45f0-ab13-1aff4767ea16\") " pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:15 crc kubenswrapper[4784]: I0106 08:41:15.141431 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rp9v\" (UniqueName: \"kubernetes.io/projected/1cbb5a43-4275-45f0-ab13-1aff4767ea16-kube-api-access-7rp9v\") pod \"community-operators-5m5zm\" (UID: \"1cbb5a43-4275-45f0-ab13-1aff4767ea16\") " pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:15 crc kubenswrapper[4784]: I0106 08:41:15.141465 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cbb5a43-4275-45f0-ab13-1aff4767ea16-catalog-content\") pod \"community-operators-5m5zm\" (UID: \"1cbb5a43-4275-45f0-ab13-1aff4767ea16\") " pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:15 crc kubenswrapper[4784]: I0106 08:41:15.142808 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cbb5a43-4275-45f0-ab13-1aff4767ea16-utilities\") pod \"community-operators-5m5zm\" (UID: \"1cbb5a43-4275-45f0-ab13-1aff4767ea16\") " pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:15 crc kubenswrapper[4784]: I0106 08:41:15.142969 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cbb5a43-4275-45f0-ab13-1aff4767ea16-catalog-content\") pod \"community-operators-5m5zm\" (UID: \"1cbb5a43-4275-45f0-ab13-1aff4767ea16\") " pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:15 crc kubenswrapper[4784]: I0106 08:41:15.175113 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rp9v\" (UniqueName: \"kubernetes.io/projected/1cbb5a43-4275-45f0-ab13-1aff4767ea16-kube-api-access-7rp9v\") pod \"community-operators-5m5zm\" (UID: \"1cbb5a43-4275-45f0-ab13-1aff4767ea16\") " pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:15 crc kubenswrapper[4784]: I0106 08:41:15.340062 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:15 crc kubenswrapper[4784]: I0106 08:41:15.986001 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5m5zm"] Jan 06 08:41:16 crc kubenswrapper[4784]: I0106 08:41:16.341792 4784 generic.go:334] "Generic (PLEG): container finished" podID="1cbb5a43-4275-45f0-ab13-1aff4767ea16" containerID="1b72a5f259eacdd5047d0b8cfac9790a823ec8e8e8a5df1d47efbd10c22a40c5" exitCode=0 Jan 06 08:41:16 crc kubenswrapper[4784]: I0106 08:41:16.341869 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5m5zm" event={"ID":"1cbb5a43-4275-45f0-ab13-1aff4767ea16","Type":"ContainerDied","Data":"1b72a5f259eacdd5047d0b8cfac9790a823ec8e8e8a5df1d47efbd10c22a40c5"} Jan 06 08:41:16 crc kubenswrapper[4784]: I0106 08:41:16.341943 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5m5zm" event={"ID":"1cbb5a43-4275-45f0-ab13-1aff4767ea16","Type":"ContainerStarted","Data":"e46caabfd261d14915b8301120762606676fe67355ed8e04b75e529b87ed0b17"} Jan 06 08:41:18 crc kubenswrapper[4784]: I0106 08:41:18.366747 4784 generic.go:334] "Generic (PLEG): container finished" podID="1cbb5a43-4275-45f0-ab13-1aff4767ea16" containerID="af90e1872924cdfe787bee15a3f72760cb1972a6693091d0657bcb24861abf9a" exitCode=0 Jan 06 08:41:18 crc kubenswrapper[4784]: I0106 08:41:18.366822 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5m5zm" event={"ID":"1cbb5a43-4275-45f0-ab13-1aff4767ea16","Type":"ContainerDied","Data":"af90e1872924cdfe787bee15a3f72760cb1972a6693091d0657bcb24861abf9a"} Jan 06 08:41:20 crc kubenswrapper[4784]: I0106 08:41:20.392763 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5m5zm" event={"ID":"1cbb5a43-4275-45f0-ab13-1aff4767ea16","Type":"ContainerStarted","Data":"59de288e700b80278d24dbadf7783b1aa5669e94a01963f9b601f09d3ae3df6d"} Jan 06 08:41:20 crc kubenswrapper[4784]: I0106 08:41:20.426116 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5m5zm" podStartSLOduration=3.594007944 podStartE2EDuration="6.426098292s" podCreationTimestamp="2026-01-06 08:41:14 +0000 UTC" firstStartedPulling="2026-01-06 08:41:16.34482741 +0000 UTC m=+1578.391000287" lastFinishedPulling="2026-01-06 08:41:19.176917758 +0000 UTC m=+1581.223090635" observedRunningTime="2026-01-06 08:41:20.422914883 +0000 UTC m=+1582.469087720" watchObservedRunningTime="2026-01-06 08:41:20.426098292 +0000 UTC m=+1582.472271129" Jan 06 08:41:23 crc kubenswrapper[4784]: I0106 08:41:23.633771 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-77vgp"] Jan 06 08:41:23 crc kubenswrapper[4784]: I0106 08:41:23.635974 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:23 crc kubenswrapper[4784]: I0106 08:41:23.703110 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-77vgp"] Jan 06 08:41:23 crc kubenswrapper[4784]: I0106 08:41:23.720581 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/849c5fbe-80e5-415b-a302-d7a6f2527166-catalog-content\") pod \"certified-operators-77vgp\" (UID: \"849c5fbe-80e5-415b-a302-d7a6f2527166\") " pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:23 crc kubenswrapper[4784]: I0106 08:41:23.720665 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/849c5fbe-80e5-415b-a302-d7a6f2527166-utilities\") pod \"certified-operators-77vgp\" (UID: \"849c5fbe-80e5-415b-a302-d7a6f2527166\") " pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:23 crc kubenswrapper[4784]: I0106 08:41:23.720958 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4flgz\" (UniqueName: \"kubernetes.io/projected/849c5fbe-80e5-415b-a302-d7a6f2527166-kube-api-access-4flgz\") pod \"certified-operators-77vgp\" (UID: \"849c5fbe-80e5-415b-a302-d7a6f2527166\") " pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:23 crc kubenswrapper[4784]: I0106 08:41:23.822384 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/849c5fbe-80e5-415b-a302-d7a6f2527166-catalog-content\") pod \"certified-operators-77vgp\" (UID: \"849c5fbe-80e5-415b-a302-d7a6f2527166\") " pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:23 crc kubenswrapper[4784]: I0106 08:41:23.822453 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/849c5fbe-80e5-415b-a302-d7a6f2527166-utilities\") pod \"certified-operators-77vgp\" (UID: \"849c5fbe-80e5-415b-a302-d7a6f2527166\") " pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:23 crc kubenswrapper[4784]: I0106 08:41:23.822515 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4flgz\" (UniqueName: \"kubernetes.io/projected/849c5fbe-80e5-415b-a302-d7a6f2527166-kube-api-access-4flgz\") pod \"certified-operators-77vgp\" (UID: \"849c5fbe-80e5-415b-a302-d7a6f2527166\") " pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:23 crc kubenswrapper[4784]: I0106 08:41:23.823200 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/849c5fbe-80e5-415b-a302-d7a6f2527166-catalog-content\") pod \"certified-operators-77vgp\" (UID: \"849c5fbe-80e5-415b-a302-d7a6f2527166\") " pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:23 crc kubenswrapper[4784]: I0106 08:41:23.823284 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/849c5fbe-80e5-415b-a302-d7a6f2527166-utilities\") pod \"certified-operators-77vgp\" (UID: \"849c5fbe-80e5-415b-a302-d7a6f2527166\") " pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:23 crc kubenswrapper[4784]: I0106 08:41:23.857079 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4flgz\" (UniqueName: \"kubernetes.io/projected/849c5fbe-80e5-415b-a302-d7a6f2527166-kube-api-access-4flgz\") pod \"certified-operators-77vgp\" (UID: \"849c5fbe-80e5-415b-a302-d7a6f2527166\") " pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:23 crc kubenswrapper[4784]: I0106 08:41:23.973464 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:24 crc kubenswrapper[4784]: I0106 08:41:24.300533 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-77vgp"] Jan 06 08:41:24 crc kubenswrapper[4784]: I0106 08:41:24.491186 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-77vgp" event={"ID":"849c5fbe-80e5-415b-a302-d7a6f2527166","Type":"ContainerStarted","Data":"de0cf5cad1648ddfc5b071dc896f3e7d8b5e23c9cc289bf5fd381d03259458ae"} Jan 06 08:41:25 crc kubenswrapper[4784]: I0106 08:41:25.341728 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:25 crc kubenswrapper[4784]: I0106 08:41:25.341799 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:25 crc kubenswrapper[4784]: I0106 08:41:25.419944 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:25 crc kubenswrapper[4784]: I0106 08:41:25.505090 4784 generic.go:334] "Generic (PLEG): container finished" podID="849c5fbe-80e5-415b-a302-d7a6f2527166" containerID="98711f9a0ad622d4c31faf969134a16552453e1ee19214a8b530ebce2c9d3567" exitCode=0 Jan 06 08:41:25 crc kubenswrapper[4784]: I0106 08:41:25.505182 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-77vgp" event={"ID":"849c5fbe-80e5-415b-a302-d7a6f2527166","Type":"ContainerDied","Data":"98711f9a0ad622d4c31faf969134a16552453e1ee19214a8b530ebce2c9d3567"} Jan 06 08:41:25 crc kubenswrapper[4784]: I0106 08:41:25.570377 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:27 crc kubenswrapper[4784]: I0106 08:41:27.530112 4784 generic.go:334] "Generic (PLEG): container finished" podID="849c5fbe-80e5-415b-a302-d7a6f2527166" containerID="22e61abc16e5fc03da35f3f2d10014941cb6ac8fc4d2972e3ead2d18bb580906" exitCode=0 Jan 06 08:41:27 crc kubenswrapper[4784]: I0106 08:41:27.530199 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-77vgp" event={"ID":"849c5fbe-80e5-415b-a302-d7a6f2527166","Type":"ContainerDied","Data":"22e61abc16e5fc03da35f3f2d10014941cb6ac8fc4d2972e3ead2d18bb580906"} Jan 06 08:41:27 crc kubenswrapper[4784]: I0106 08:41:27.815297 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5m5zm"] Jan 06 08:41:27 crc kubenswrapper[4784]: I0106 08:41:27.815908 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5m5zm" podUID="1cbb5a43-4275-45f0-ab13-1aff4767ea16" containerName="registry-server" containerID="cri-o://59de288e700b80278d24dbadf7783b1aa5669e94a01963f9b601f09d3ae3df6d" gracePeriod=2 Jan 06 08:41:28 crc kubenswrapper[4784]: I0106 08:41:28.545184 4784 generic.go:334] "Generic (PLEG): container finished" podID="1cbb5a43-4275-45f0-ab13-1aff4767ea16" containerID="59de288e700b80278d24dbadf7783b1aa5669e94a01963f9b601f09d3ae3df6d" exitCode=0 Jan 06 08:41:28 crc kubenswrapper[4784]: I0106 08:41:28.545703 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5m5zm" event={"ID":"1cbb5a43-4275-45f0-ab13-1aff4767ea16","Type":"ContainerDied","Data":"59de288e700b80278d24dbadf7783b1aa5669e94a01963f9b601f09d3ae3df6d"} Jan 06 08:41:28 crc kubenswrapper[4784]: I0106 08:41:28.551634 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-77vgp" event={"ID":"849c5fbe-80e5-415b-a302-d7a6f2527166","Type":"ContainerStarted","Data":"206ef5c6e823f9e9b6bc6140c67307a06db03adf314bc682dbbee10c015c2efb"} Jan 06 08:41:28 crc kubenswrapper[4784]: I0106 08:41:28.590148 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-77vgp" podStartSLOduration=2.848514684 podStartE2EDuration="5.590118404s" podCreationTimestamp="2026-01-06 08:41:23 +0000 UTC" firstStartedPulling="2026-01-06 08:41:25.508683573 +0000 UTC m=+1587.554856440" lastFinishedPulling="2026-01-06 08:41:28.250287313 +0000 UTC m=+1590.296460160" observedRunningTime="2026-01-06 08:41:28.579739441 +0000 UTC m=+1590.625912358" watchObservedRunningTime="2026-01-06 08:41:28.590118404 +0000 UTC m=+1590.636291281" Jan 06 08:41:28 crc kubenswrapper[4784]: I0106 08:41:28.813831 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.010450 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cbb5a43-4275-45f0-ab13-1aff4767ea16-catalog-content\") pod \"1cbb5a43-4275-45f0-ab13-1aff4767ea16\" (UID: \"1cbb5a43-4275-45f0-ab13-1aff4767ea16\") " Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.010997 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7rp9v\" (UniqueName: \"kubernetes.io/projected/1cbb5a43-4275-45f0-ab13-1aff4767ea16-kube-api-access-7rp9v\") pod \"1cbb5a43-4275-45f0-ab13-1aff4767ea16\" (UID: \"1cbb5a43-4275-45f0-ab13-1aff4767ea16\") " Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.012484 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cbb5a43-4275-45f0-ab13-1aff4767ea16-utilities\") pod \"1cbb5a43-4275-45f0-ab13-1aff4767ea16\" (UID: \"1cbb5a43-4275-45f0-ab13-1aff4767ea16\") " Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.013198 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1cbb5a43-4275-45f0-ab13-1aff4767ea16-utilities" (OuterVolumeSpecName: "utilities") pod "1cbb5a43-4275-45f0-ab13-1aff4767ea16" (UID: "1cbb5a43-4275-45f0-ab13-1aff4767ea16"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.025776 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cbb5a43-4275-45f0-ab13-1aff4767ea16-kube-api-access-7rp9v" (OuterVolumeSpecName: "kube-api-access-7rp9v") pod "1cbb5a43-4275-45f0-ab13-1aff4767ea16" (UID: "1cbb5a43-4275-45f0-ab13-1aff4767ea16"). InnerVolumeSpecName "kube-api-access-7rp9v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.073120 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1cbb5a43-4275-45f0-ab13-1aff4767ea16-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1cbb5a43-4275-45f0-ab13-1aff4767ea16" (UID: "1cbb5a43-4275-45f0-ab13-1aff4767ea16"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.113812 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cbb5a43-4275-45f0-ab13-1aff4767ea16-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.113871 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7rp9v\" (UniqueName: \"kubernetes.io/projected/1cbb5a43-4275-45f0-ab13-1aff4767ea16-kube-api-access-7rp9v\") on node \"crc\" DevicePath \"\"" Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.113885 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cbb5a43-4275-45f0-ab13-1aff4767ea16-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.568075 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5m5zm" event={"ID":"1cbb5a43-4275-45f0-ab13-1aff4767ea16","Type":"ContainerDied","Data":"e46caabfd261d14915b8301120762606676fe67355ed8e04b75e529b87ed0b17"} Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.568742 4784 scope.go:117] "RemoveContainer" containerID="59de288e700b80278d24dbadf7783b1aa5669e94a01963f9b601f09d3ae3df6d" Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.569041 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5m5zm" Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.614189 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5m5zm"] Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.615219 4784 scope.go:117] "RemoveContainer" containerID="af90e1872924cdfe787bee15a3f72760cb1972a6693091d0657bcb24861abf9a" Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.621333 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5m5zm"] Jan 06 08:41:29 crc kubenswrapper[4784]: I0106 08:41:29.639034 4784 scope.go:117] "RemoveContainer" containerID="1b72a5f259eacdd5047d0b8cfac9790a823ec8e8e8a5df1d47efbd10c22a40c5" Jan 06 08:41:30 crc kubenswrapper[4784]: I0106 08:41:30.334687 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1cbb5a43-4275-45f0-ab13-1aff4767ea16" path="/var/lib/kubelet/pods/1cbb5a43-4275-45f0-ab13-1aff4767ea16/volumes" Jan 06 08:41:33 crc kubenswrapper[4784]: I0106 08:41:33.974097 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:33 crc kubenswrapper[4784]: I0106 08:41:33.974687 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:34 crc kubenswrapper[4784]: I0106 08:41:34.055349 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:34 crc kubenswrapper[4784]: I0106 08:41:34.690163 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:34 crc kubenswrapper[4784]: I0106 08:41:34.765634 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-77vgp"] Jan 06 08:41:36 crc kubenswrapper[4784]: I0106 08:41:36.642326 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-77vgp" podUID="849c5fbe-80e5-415b-a302-d7a6f2527166" containerName="registry-server" containerID="cri-o://206ef5c6e823f9e9b6bc6140c67307a06db03adf314bc682dbbee10c015c2efb" gracePeriod=2 Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.110771 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.257192 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/849c5fbe-80e5-415b-a302-d7a6f2527166-utilities\") pod \"849c5fbe-80e5-415b-a302-d7a6f2527166\" (UID: \"849c5fbe-80e5-415b-a302-d7a6f2527166\") " Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.257413 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4flgz\" (UniqueName: \"kubernetes.io/projected/849c5fbe-80e5-415b-a302-d7a6f2527166-kube-api-access-4flgz\") pod \"849c5fbe-80e5-415b-a302-d7a6f2527166\" (UID: \"849c5fbe-80e5-415b-a302-d7a6f2527166\") " Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.257593 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/849c5fbe-80e5-415b-a302-d7a6f2527166-catalog-content\") pod \"849c5fbe-80e5-415b-a302-d7a6f2527166\" (UID: \"849c5fbe-80e5-415b-a302-d7a6f2527166\") " Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.259652 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/849c5fbe-80e5-415b-a302-d7a6f2527166-utilities" (OuterVolumeSpecName: "utilities") pod "849c5fbe-80e5-415b-a302-d7a6f2527166" (UID: "849c5fbe-80e5-415b-a302-d7a6f2527166"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.269627 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/849c5fbe-80e5-415b-a302-d7a6f2527166-kube-api-access-4flgz" (OuterVolumeSpecName: "kube-api-access-4flgz") pod "849c5fbe-80e5-415b-a302-d7a6f2527166" (UID: "849c5fbe-80e5-415b-a302-d7a6f2527166"). InnerVolumeSpecName "kube-api-access-4flgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.321276 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/849c5fbe-80e5-415b-a302-d7a6f2527166-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "849c5fbe-80e5-415b-a302-d7a6f2527166" (UID: "849c5fbe-80e5-415b-a302-d7a6f2527166"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.360458 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/849c5fbe-80e5-415b-a302-d7a6f2527166-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.360516 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4flgz\" (UniqueName: \"kubernetes.io/projected/849c5fbe-80e5-415b-a302-d7a6f2527166-kube-api-access-4flgz\") on node \"crc\" DevicePath \"\"" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.360538 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/849c5fbe-80e5-415b-a302-d7a6f2527166-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.683236 4784 generic.go:334] "Generic (PLEG): container finished" podID="849c5fbe-80e5-415b-a302-d7a6f2527166" containerID="206ef5c6e823f9e9b6bc6140c67307a06db03adf314bc682dbbee10c015c2efb" exitCode=0 Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.683324 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-77vgp" event={"ID":"849c5fbe-80e5-415b-a302-d7a6f2527166","Type":"ContainerDied","Data":"206ef5c6e823f9e9b6bc6140c67307a06db03adf314bc682dbbee10c015c2efb"} Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.683381 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-77vgp" event={"ID":"849c5fbe-80e5-415b-a302-d7a6f2527166","Type":"ContainerDied","Data":"de0cf5cad1648ddfc5b071dc896f3e7d8b5e23c9cc289bf5fd381d03259458ae"} Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.683416 4784 scope.go:117] "RemoveContainer" containerID="206ef5c6e823f9e9b6bc6140c67307a06db03adf314bc682dbbee10c015c2efb" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.683477 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-77vgp" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.723378 4784 scope.go:117] "RemoveContainer" containerID="22e61abc16e5fc03da35f3f2d10014941cb6ac8fc4d2972e3ead2d18bb580906" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.746205 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-77vgp"] Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.772513 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-77vgp"] Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.779410 4784 scope.go:117] "RemoveContainer" containerID="98711f9a0ad622d4c31faf969134a16552453e1ee19214a8b530ebce2c9d3567" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.803690 4784 scope.go:117] "RemoveContainer" containerID="206ef5c6e823f9e9b6bc6140c67307a06db03adf314bc682dbbee10c015c2efb" Jan 06 08:41:37 crc kubenswrapper[4784]: E0106 08:41:37.804470 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"206ef5c6e823f9e9b6bc6140c67307a06db03adf314bc682dbbee10c015c2efb\": container with ID starting with 206ef5c6e823f9e9b6bc6140c67307a06db03adf314bc682dbbee10c015c2efb not found: ID does not exist" containerID="206ef5c6e823f9e9b6bc6140c67307a06db03adf314bc682dbbee10c015c2efb" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.804523 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"206ef5c6e823f9e9b6bc6140c67307a06db03adf314bc682dbbee10c015c2efb"} err="failed to get container status \"206ef5c6e823f9e9b6bc6140c67307a06db03adf314bc682dbbee10c015c2efb\": rpc error: code = NotFound desc = could not find container \"206ef5c6e823f9e9b6bc6140c67307a06db03adf314bc682dbbee10c015c2efb\": container with ID starting with 206ef5c6e823f9e9b6bc6140c67307a06db03adf314bc682dbbee10c015c2efb not found: ID does not exist" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.804585 4784 scope.go:117] "RemoveContainer" containerID="22e61abc16e5fc03da35f3f2d10014941cb6ac8fc4d2972e3ead2d18bb580906" Jan 06 08:41:37 crc kubenswrapper[4784]: E0106 08:41:37.805131 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22e61abc16e5fc03da35f3f2d10014941cb6ac8fc4d2972e3ead2d18bb580906\": container with ID starting with 22e61abc16e5fc03da35f3f2d10014941cb6ac8fc4d2972e3ead2d18bb580906 not found: ID does not exist" containerID="22e61abc16e5fc03da35f3f2d10014941cb6ac8fc4d2972e3ead2d18bb580906" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.805165 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22e61abc16e5fc03da35f3f2d10014941cb6ac8fc4d2972e3ead2d18bb580906"} err="failed to get container status \"22e61abc16e5fc03da35f3f2d10014941cb6ac8fc4d2972e3ead2d18bb580906\": rpc error: code = NotFound desc = could not find container \"22e61abc16e5fc03da35f3f2d10014941cb6ac8fc4d2972e3ead2d18bb580906\": container with ID starting with 22e61abc16e5fc03da35f3f2d10014941cb6ac8fc4d2972e3ead2d18bb580906 not found: ID does not exist" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.805188 4784 scope.go:117] "RemoveContainer" containerID="98711f9a0ad622d4c31faf969134a16552453e1ee19214a8b530ebce2c9d3567" Jan 06 08:41:37 crc kubenswrapper[4784]: E0106 08:41:37.805772 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98711f9a0ad622d4c31faf969134a16552453e1ee19214a8b530ebce2c9d3567\": container with ID starting with 98711f9a0ad622d4c31faf969134a16552453e1ee19214a8b530ebce2c9d3567 not found: ID does not exist" containerID="98711f9a0ad622d4c31faf969134a16552453e1ee19214a8b530ebce2c9d3567" Jan 06 08:41:37 crc kubenswrapper[4784]: I0106 08:41:37.805836 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98711f9a0ad622d4c31faf969134a16552453e1ee19214a8b530ebce2c9d3567"} err="failed to get container status \"98711f9a0ad622d4c31faf969134a16552453e1ee19214a8b530ebce2c9d3567\": rpc error: code = NotFound desc = could not find container \"98711f9a0ad622d4c31faf969134a16552453e1ee19214a8b530ebce2c9d3567\": container with ID starting with 98711f9a0ad622d4c31faf969134a16552453e1ee19214a8b530ebce2c9d3567 not found: ID does not exist" Jan 06 08:41:38 crc kubenswrapper[4784]: I0106 08:41:38.334869 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="849c5fbe-80e5-415b-a302-d7a6f2527166" path="/var/lib/kubelet/pods/849c5fbe-80e5-415b-a302-d7a6f2527166/volumes" Jan 06 08:41:44 crc kubenswrapper[4784]: I0106 08:41:44.351198 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:41:44 crc kubenswrapper[4784]: I0106 08:41:44.352081 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:41:44 crc kubenswrapper[4784]: I0106 08:41:44.352165 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:41:44 crc kubenswrapper[4784]: I0106 08:41:44.353364 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 08:41:44 crc kubenswrapper[4784]: I0106 08:41:44.353473 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" gracePeriod=600 Jan 06 08:41:44 crc kubenswrapper[4784]: I0106 08:41:44.763129 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" exitCode=0 Jan 06 08:41:44 crc kubenswrapper[4784]: I0106 08:41:44.763207 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509"} Jan 06 08:41:44 crc kubenswrapper[4784]: I0106 08:41:44.763360 4784 scope.go:117] "RemoveContainer" containerID="a7659b10b1b4bc4ca6ce124339b33561daf47c43badfd76a44e91fdc1fbdd919" Jan 06 08:41:44 crc kubenswrapper[4784]: E0106 08:41:44.992089 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:41:45 crc kubenswrapper[4784]: I0106 08:41:45.779836 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:41:45 crc kubenswrapper[4784]: E0106 08:41:45.780278 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:42:00 crc kubenswrapper[4784]: I0106 08:42:00.312256 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:42:00 crc kubenswrapper[4784]: E0106 08:42:00.313332 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:42:14 crc kubenswrapper[4784]: I0106 08:42:14.253865 4784 scope.go:117] "RemoveContainer" containerID="c146bbd6766980179c44bb174dda179d82aa1f0d0e7eb63a3c52a015b45e5c48" Jan 06 08:42:14 crc kubenswrapper[4784]: I0106 08:42:14.293872 4784 scope.go:117] "RemoveContainer" containerID="ec7545f80161168cdb2642216f715a156c1c3b53b6c2e1f2fc5134b9972dd4a7" Jan 06 08:42:14 crc kubenswrapper[4784]: I0106 08:42:14.313791 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:42:14 crc kubenswrapper[4784]: E0106 08:42:14.314186 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:42:14 crc kubenswrapper[4784]: I0106 08:42:14.333044 4784 scope.go:117] "RemoveContainer" containerID="db525362445edf924375717f5d9fb23cedb69000cd20cfb43d1dff81eaa3c9ea" Jan 06 08:42:14 crc kubenswrapper[4784]: I0106 08:42:14.358269 4784 scope.go:117] "RemoveContainer" containerID="a50750221049fbec4ad07b99f4ba3153c6a3c671546f9b7f4d3478557d012fcc" Jan 06 08:42:14 crc kubenswrapper[4784]: I0106 08:42:14.398077 4784 scope.go:117] "RemoveContainer" containerID="35dd0f46d6e5474f6abaa8d280c63667bdb43f425793cbafe842053332f49e3e" Jan 06 08:42:14 crc kubenswrapper[4784]: I0106 08:42:14.438042 4784 scope.go:117] "RemoveContainer" containerID="00bd19712a8124651f5eae66da5fb56d3422ce328886923af0f40f521d7f84fd" Jan 06 08:42:14 crc kubenswrapper[4784]: I0106 08:42:14.489157 4784 scope.go:117] "RemoveContainer" containerID="fa4f624305789bdc7c5eab95920d6cbcde6cab7a62a2b6c1f38605685bc60cd5" Jan 06 08:42:14 crc kubenswrapper[4784]: I0106 08:42:14.516040 4784 scope.go:117] "RemoveContainer" containerID="9f16d9ac58ab757de59707bee78657c17595d2a5b6c9e51b71adc35b285b71e5" Jan 06 08:42:14 crc kubenswrapper[4784]: I0106 08:42:14.539793 4784 scope.go:117] "RemoveContainer" containerID="f8ee637f6b50a818ba4066de18a62301be2e77b51f282c93e245f39888f43173" Jan 06 08:42:14 crc kubenswrapper[4784]: I0106 08:42:14.576202 4784 scope.go:117] "RemoveContainer" containerID="b9af3d9746a8c69e9c9fc27f1bb3bb1810ce7e665587ba466c9798a81218f2e4" Jan 06 08:42:14 crc kubenswrapper[4784]: I0106 08:42:14.604719 4784 scope.go:117] "RemoveContainer" containerID="9db8fbcec626656592924a06b0f3af9d0a8e07f257adf5223b12f6ac5c873b4f" Jan 06 08:42:14 crc kubenswrapper[4784]: I0106 08:42:14.629225 4784 scope.go:117] "RemoveContainer" containerID="e103569a68bc3df3344544a89d4f62943f452c3f5a46a6e83ca83cd04df94db5" Jan 06 08:42:14 crc kubenswrapper[4784]: I0106 08:42:14.651618 4784 scope.go:117] "RemoveContainer" containerID="290d01665ba80eec4d252487e3be4e06f7105f19aaec277d126da7d8f458b219" Jan 06 08:42:14 crc kubenswrapper[4784]: I0106 08:42:14.668507 4784 scope.go:117] "RemoveContainer" containerID="5eaffcc3fc842b86a3f57a8ce3fde89734b9bf87ec0b08a500a602f48a25ee64" Jan 06 08:42:29 crc kubenswrapper[4784]: I0106 08:42:29.312503 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:42:29 crc kubenswrapper[4784]: E0106 08:42:29.313780 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:42:44 crc kubenswrapper[4784]: I0106 08:42:44.313078 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:42:44 crc kubenswrapper[4784]: E0106 08:42:44.314090 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:42:56 crc kubenswrapper[4784]: I0106 08:42:56.312787 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:42:56 crc kubenswrapper[4784]: E0106 08:42:56.313786 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:43:08 crc kubenswrapper[4784]: I0106 08:43:08.319320 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:43:08 crc kubenswrapper[4784]: E0106 08:43:08.320343 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:43:14 crc kubenswrapper[4784]: I0106 08:43:14.898150 4784 scope.go:117] "RemoveContainer" containerID="ef4ed89dc708181ffc925d7e028fb5062c84cfa88d18f20a58a06f6a86020e18" Jan 06 08:43:14 crc kubenswrapper[4784]: I0106 08:43:14.957160 4784 scope.go:117] "RemoveContainer" containerID="73a2d7251a65406d6311221b1b3d82d4d2fe27e88330dcb6a69286e36feb9424" Jan 06 08:43:14 crc kubenswrapper[4784]: I0106 08:43:14.995519 4784 scope.go:117] "RemoveContainer" containerID="286e16d27fa94436ac1831d5cc52871c589d62a999f488353b1b1767a2d56d65" Jan 06 08:43:15 crc kubenswrapper[4784]: I0106 08:43:15.027116 4784 scope.go:117] "RemoveContainer" containerID="51c6be1566d7abfb917cf987fce43a6501285d641013f5c5b50a40999bed6c52" Jan 06 08:43:15 crc kubenswrapper[4784]: I0106 08:43:15.090970 4784 scope.go:117] "RemoveContainer" containerID="dbb791de4205d4d85966f3bf1f337e666a95a1d84c1e47b53f6433801fbb0b76" Jan 06 08:43:15 crc kubenswrapper[4784]: I0106 08:43:15.153302 4784 scope.go:117] "RemoveContainer" containerID="717f5ac3058902d9ecd453090dafa7bf034dbcf5d2373c74cb87d5e8ed0e9fa6" Jan 06 08:43:15 crc kubenswrapper[4784]: I0106 08:43:15.180223 4784 scope.go:117] "RemoveContainer" containerID="ecf83e1238473049fa9df0f598a521008a4c7dc6b181e9eaa22fa538a42a06a8" Jan 06 08:43:15 crc kubenswrapper[4784]: I0106 08:43:15.207662 4784 scope.go:117] "RemoveContainer" containerID="e3ec423b01120c4bc4f3d63760694fc3257341409d7c3c6a6974d5e14e19ff27" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.291800 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-m6wvv"] Jan 06 08:43:21 crc kubenswrapper[4784]: E0106 08:43:21.296032 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="849c5fbe-80e5-415b-a302-d7a6f2527166" containerName="extract-content" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.296236 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="849c5fbe-80e5-415b-a302-d7a6f2527166" containerName="extract-content" Jan 06 08:43:21 crc kubenswrapper[4784]: E0106 08:43:21.296397 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="849c5fbe-80e5-415b-a302-d7a6f2527166" containerName="extract-utilities" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.296525 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="849c5fbe-80e5-415b-a302-d7a6f2527166" containerName="extract-utilities" Jan 06 08:43:21 crc kubenswrapper[4784]: E0106 08:43:21.296738 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cbb5a43-4275-45f0-ab13-1aff4767ea16" containerName="registry-server" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.296898 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cbb5a43-4275-45f0-ab13-1aff4767ea16" containerName="registry-server" Jan 06 08:43:21 crc kubenswrapper[4784]: E0106 08:43:21.297055 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cbb5a43-4275-45f0-ab13-1aff4767ea16" containerName="extract-content" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.297179 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cbb5a43-4275-45f0-ab13-1aff4767ea16" containerName="extract-content" Jan 06 08:43:21 crc kubenswrapper[4784]: E0106 08:43:21.297354 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cbb5a43-4275-45f0-ab13-1aff4767ea16" containerName="extract-utilities" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.297520 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cbb5a43-4275-45f0-ab13-1aff4767ea16" containerName="extract-utilities" Jan 06 08:43:21 crc kubenswrapper[4784]: E0106 08:43:21.297781 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="849c5fbe-80e5-415b-a302-d7a6f2527166" containerName="registry-server" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.297955 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="849c5fbe-80e5-415b-a302-d7a6f2527166" containerName="registry-server" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.298500 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cbb5a43-4275-45f0-ab13-1aff4767ea16" containerName="registry-server" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.298789 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="849c5fbe-80e5-415b-a302-d7a6f2527166" containerName="registry-server" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.301025 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.324727 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-m6wvv"] Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.468775 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-utilities\") pod \"redhat-operators-m6wvv\" (UID: \"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c\") " pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.469054 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46mfv\" (UniqueName: \"kubernetes.io/projected/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-kube-api-access-46mfv\") pod \"redhat-operators-m6wvv\" (UID: \"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c\") " pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.469132 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-catalog-content\") pod \"redhat-operators-m6wvv\" (UID: \"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c\") " pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.570989 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-catalog-content\") pod \"redhat-operators-m6wvv\" (UID: \"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c\") " pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.571438 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-utilities\") pod \"redhat-operators-m6wvv\" (UID: \"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c\") " pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.571656 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46mfv\" (UniqueName: \"kubernetes.io/projected/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-kube-api-access-46mfv\") pod \"redhat-operators-m6wvv\" (UID: \"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c\") " pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.571799 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-catalog-content\") pod \"redhat-operators-m6wvv\" (UID: \"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c\") " pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.571952 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-utilities\") pod \"redhat-operators-m6wvv\" (UID: \"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c\") " pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.600496 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46mfv\" (UniqueName: \"kubernetes.io/projected/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-kube-api-access-46mfv\") pod \"redhat-operators-m6wvv\" (UID: \"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c\") " pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:21 crc kubenswrapper[4784]: I0106 08:43:21.695776 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:22 crc kubenswrapper[4784]: I0106 08:43:22.146911 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-m6wvv"] Jan 06 08:43:22 crc kubenswrapper[4784]: I0106 08:43:22.312868 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:43:22 crc kubenswrapper[4784]: E0106 08:43:22.313232 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:43:22 crc kubenswrapper[4784]: I0106 08:43:22.853480 4784 generic.go:334] "Generic (PLEG): container finished" podID="74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" containerID="19663ac3ac860a1aed9fe68d97d660a9e17b81496a75a90e7dbd8bb1e6fcb5ee" exitCode=0 Jan 06 08:43:22 crc kubenswrapper[4784]: I0106 08:43:22.853565 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m6wvv" event={"ID":"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c","Type":"ContainerDied","Data":"19663ac3ac860a1aed9fe68d97d660a9e17b81496a75a90e7dbd8bb1e6fcb5ee"} Jan 06 08:43:22 crc kubenswrapper[4784]: I0106 08:43:22.853885 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m6wvv" event={"ID":"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c","Type":"ContainerStarted","Data":"ae4ebd5231494e4ff6177d3dd369df5c1e1e92afd52770ba5a63499d6b571c74"} Jan 06 08:43:22 crc kubenswrapper[4784]: I0106 08:43:22.856097 4784 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 06 08:43:23 crc kubenswrapper[4784]: I0106 08:43:23.862771 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m6wvv" event={"ID":"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c","Type":"ContainerStarted","Data":"fe8017f27675d06786e47c97ef8347f6e2549e3110840429c118a33a3702f43d"} Jan 06 08:43:24 crc kubenswrapper[4784]: I0106 08:43:24.874141 4784 generic.go:334] "Generic (PLEG): container finished" podID="74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" containerID="fe8017f27675d06786e47c97ef8347f6e2549e3110840429c118a33a3702f43d" exitCode=0 Jan 06 08:43:24 crc kubenswrapper[4784]: I0106 08:43:24.874204 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m6wvv" event={"ID":"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c","Type":"ContainerDied","Data":"fe8017f27675d06786e47c97ef8347f6e2549e3110840429c118a33a3702f43d"} Jan 06 08:43:28 crc kubenswrapper[4784]: I0106 08:43:28.917298 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m6wvv" event={"ID":"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c","Type":"ContainerStarted","Data":"5bab5e9ee6b3d6e233dc9d6cceb472558cd03b33376fed065ebdf13623f5d849"} Jan 06 08:43:28 crc kubenswrapper[4784]: I0106 08:43:28.947918 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-m6wvv" podStartSLOduration=3.791362348 podStartE2EDuration="7.947897272s" podCreationTimestamp="2026-01-06 08:43:21 +0000 UTC" firstStartedPulling="2026-01-06 08:43:22.855888179 +0000 UTC m=+1704.902061016" lastFinishedPulling="2026-01-06 08:43:27.012423103 +0000 UTC m=+1709.058595940" observedRunningTime="2026-01-06 08:43:28.947752858 +0000 UTC m=+1710.993925725" watchObservedRunningTime="2026-01-06 08:43:28.947897272 +0000 UTC m=+1710.994070119" Jan 06 08:43:31 crc kubenswrapper[4784]: I0106 08:43:31.696444 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:31 crc kubenswrapper[4784]: I0106 08:43:31.697232 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:32 crc kubenswrapper[4784]: I0106 08:43:32.746675 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-m6wvv" podUID="74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" containerName="registry-server" probeResult="failure" output=< Jan 06 08:43:32 crc kubenswrapper[4784]: timeout: failed to connect service ":50051" within 1s Jan 06 08:43:32 crc kubenswrapper[4784]: > Jan 06 08:43:35 crc kubenswrapper[4784]: I0106 08:43:35.312217 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:43:35 crc kubenswrapper[4784]: E0106 08:43:35.312482 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:43:41 crc kubenswrapper[4784]: I0106 08:43:41.752610 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:41 crc kubenswrapper[4784]: I0106 08:43:41.812034 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:42 crc kubenswrapper[4784]: I0106 08:43:42.000715 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-m6wvv"] Jan 06 08:43:43 crc kubenswrapper[4784]: I0106 08:43:43.058377 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-m6wvv" podUID="74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" containerName="registry-server" containerID="cri-o://5bab5e9ee6b3d6e233dc9d6cceb472558cd03b33376fed065ebdf13623f5d849" gracePeriod=2 Jan 06 08:43:44 crc kubenswrapper[4784]: I0106 08:43:44.069601 4784 generic.go:334] "Generic (PLEG): container finished" podID="74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" containerID="5bab5e9ee6b3d6e233dc9d6cceb472558cd03b33376fed065ebdf13623f5d849" exitCode=0 Jan 06 08:43:44 crc kubenswrapper[4784]: I0106 08:43:44.069647 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m6wvv" event={"ID":"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c","Type":"ContainerDied","Data":"5bab5e9ee6b3d6e233dc9d6cceb472558cd03b33376fed065ebdf13623f5d849"} Jan 06 08:43:44 crc kubenswrapper[4784]: I0106 08:43:44.633405 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:44 crc kubenswrapper[4784]: I0106 08:43:44.754382 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-catalog-content\") pod \"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c\" (UID: \"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c\") " Jan 06 08:43:44 crc kubenswrapper[4784]: I0106 08:43:44.754456 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46mfv\" (UniqueName: \"kubernetes.io/projected/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-kube-api-access-46mfv\") pod \"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c\" (UID: \"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c\") " Jan 06 08:43:44 crc kubenswrapper[4784]: I0106 08:43:44.754579 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-utilities\") pod \"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c\" (UID: \"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c\") " Jan 06 08:43:44 crc kubenswrapper[4784]: I0106 08:43:44.756475 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-utilities" (OuterVolumeSpecName: "utilities") pod "74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" (UID: "74cb7ffa-e9a4-48b9-aff5-3728797f8e8c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:43:44 crc kubenswrapper[4784]: I0106 08:43:44.765206 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-kube-api-access-46mfv" (OuterVolumeSpecName: "kube-api-access-46mfv") pod "74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" (UID: "74cb7ffa-e9a4-48b9-aff5-3728797f8e8c"). InnerVolumeSpecName "kube-api-access-46mfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:43:44 crc kubenswrapper[4784]: I0106 08:43:44.857393 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46mfv\" (UniqueName: \"kubernetes.io/projected/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-kube-api-access-46mfv\") on node \"crc\" DevicePath \"\"" Jan 06 08:43:44 crc kubenswrapper[4784]: I0106 08:43:44.857457 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:43:44 crc kubenswrapper[4784]: I0106 08:43:44.922082 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" (UID: "74cb7ffa-e9a4-48b9-aff5-3728797f8e8c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:43:44 crc kubenswrapper[4784]: I0106 08:43:44.959741 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:43:45 crc kubenswrapper[4784]: I0106 08:43:45.081031 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m6wvv" event={"ID":"74cb7ffa-e9a4-48b9-aff5-3728797f8e8c","Type":"ContainerDied","Data":"ae4ebd5231494e4ff6177d3dd369df5c1e1e92afd52770ba5a63499d6b571c74"} Jan 06 08:43:45 crc kubenswrapper[4784]: I0106 08:43:45.081099 4784 scope.go:117] "RemoveContainer" containerID="5bab5e9ee6b3d6e233dc9d6cceb472558cd03b33376fed065ebdf13623f5d849" Jan 06 08:43:45 crc kubenswrapper[4784]: I0106 08:43:45.081120 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m6wvv" Jan 06 08:43:45 crc kubenswrapper[4784]: I0106 08:43:45.109865 4784 scope.go:117] "RemoveContainer" containerID="fe8017f27675d06786e47c97ef8347f6e2549e3110840429c118a33a3702f43d" Jan 06 08:43:45 crc kubenswrapper[4784]: I0106 08:43:45.142084 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-m6wvv"] Jan 06 08:43:45 crc kubenswrapper[4784]: I0106 08:43:45.147644 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-m6wvv"] Jan 06 08:43:45 crc kubenswrapper[4784]: I0106 08:43:45.161774 4784 scope.go:117] "RemoveContainer" containerID="19663ac3ac860a1aed9fe68d97d660a9e17b81496a75a90e7dbd8bb1e6fcb5ee" Jan 06 08:43:46 crc kubenswrapper[4784]: I0106 08:43:46.328939 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" path="/var/lib/kubelet/pods/74cb7ffa-e9a4-48b9-aff5-3728797f8e8c/volumes" Jan 06 08:43:47 crc kubenswrapper[4784]: I0106 08:43:47.312348 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:43:47 crc kubenswrapper[4784]: E0106 08:43:47.313274 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:44:01 crc kubenswrapper[4784]: I0106 08:44:01.312428 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:44:01 crc kubenswrapper[4784]: E0106 08:44:01.313187 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:44:12 crc kubenswrapper[4784]: I0106 08:44:12.312694 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:44:12 crc kubenswrapper[4784]: E0106 08:44:12.315398 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:44:15 crc kubenswrapper[4784]: I0106 08:44:15.366873 4784 scope.go:117] "RemoveContainer" containerID="3323b77f708d877c321b69ac39bf8466a315e9fe763a648674fb29ca28122eac" Jan 06 08:44:26 crc kubenswrapper[4784]: I0106 08:44:26.312970 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:44:26 crc kubenswrapper[4784]: E0106 08:44:26.314348 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:44:37 crc kubenswrapper[4784]: I0106 08:44:37.312093 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:44:37 crc kubenswrapper[4784]: E0106 08:44:37.313163 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:44:48 crc kubenswrapper[4784]: I0106 08:44:48.319933 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:44:48 crc kubenswrapper[4784]: E0106 08:44:48.320839 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.164301 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg"] Jan 06 08:45:00 crc kubenswrapper[4784]: E0106 08:45:00.165981 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" containerName="extract-content" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.166011 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" containerName="extract-content" Jan 06 08:45:00 crc kubenswrapper[4784]: E0106 08:45:00.166048 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" containerName="extract-utilities" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.166063 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" containerName="extract-utilities" Jan 06 08:45:00 crc kubenswrapper[4784]: E0106 08:45:00.166098 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" containerName="registry-server" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.166109 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" containerName="registry-server" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.166345 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="74cb7ffa-e9a4-48b9-aff5-3728797f8e8c" containerName="registry-server" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.167178 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.170055 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.170975 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.183852 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg"] Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.271401 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72c8f74c-6820-4a79-809e-52284e112277-secret-volume\") pod \"collect-profiles-29461485-kstgg\" (UID: \"72c8f74c-6820-4a79-809e-52284e112277\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.271475 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5j5n\" (UniqueName: \"kubernetes.io/projected/72c8f74c-6820-4a79-809e-52284e112277-kube-api-access-d5j5n\") pod \"collect-profiles-29461485-kstgg\" (UID: \"72c8f74c-6820-4a79-809e-52284e112277\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.271536 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72c8f74c-6820-4a79-809e-52284e112277-config-volume\") pod \"collect-profiles-29461485-kstgg\" (UID: \"72c8f74c-6820-4a79-809e-52284e112277\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.372765 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72c8f74c-6820-4a79-809e-52284e112277-config-volume\") pod \"collect-profiles-29461485-kstgg\" (UID: \"72c8f74c-6820-4a79-809e-52284e112277\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.372919 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72c8f74c-6820-4a79-809e-52284e112277-secret-volume\") pod \"collect-profiles-29461485-kstgg\" (UID: \"72c8f74c-6820-4a79-809e-52284e112277\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.373001 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5j5n\" (UniqueName: \"kubernetes.io/projected/72c8f74c-6820-4a79-809e-52284e112277-kube-api-access-d5j5n\") pod \"collect-profiles-29461485-kstgg\" (UID: \"72c8f74c-6820-4a79-809e-52284e112277\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.375592 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72c8f74c-6820-4a79-809e-52284e112277-config-volume\") pod \"collect-profiles-29461485-kstgg\" (UID: \"72c8f74c-6820-4a79-809e-52284e112277\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.386342 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72c8f74c-6820-4a79-809e-52284e112277-secret-volume\") pod \"collect-profiles-29461485-kstgg\" (UID: \"72c8f74c-6820-4a79-809e-52284e112277\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.398431 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5j5n\" (UniqueName: \"kubernetes.io/projected/72c8f74c-6820-4a79-809e-52284e112277-kube-api-access-d5j5n\") pod \"collect-profiles-29461485-kstgg\" (UID: \"72c8f74c-6820-4a79-809e-52284e112277\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.493494 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.773522 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg"] Jan 06 08:45:00 crc kubenswrapper[4784]: I0106 08:45:00.797456 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" event={"ID":"72c8f74c-6820-4a79-809e-52284e112277","Type":"ContainerStarted","Data":"f632b2ee17343596c9cbe134228db8ff569ca280cd3a1adf0e616384d276b3da"} Jan 06 08:45:01 crc kubenswrapper[4784]: I0106 08:45:01.818031 4784 generic.go:334] "Generic (PLEG): container finished" podID="72c8f74c-6820-4a79-809e-52284e112277" containerID="203c20b89f8715ee94c8fc711866de9ff21874a51f7bb10faad1778949049337" exitCode=0 Jan 06 08:45:01 crc kubenswrapper[4784]: I0106 08:45:01.818768 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" event={"ID":"72c8f74c-6820-4a79-809e-52284e112277","Type":"ContainerDied","Data":"203c20b89f8715ee94c8fc711866de9ff21874a51f7bb10faad1778949049337"} Jan 06 08:45:02 crc kubenswrapper[4784]: I0106 08:45:02.312368 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:45:02 crc kubenswrapper[4784]: E0106 08:45:02.312862 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:45:03 crc kubenswrapper[4784]: I0106 08:45:03.154335 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" Jan 06 08:45:03 crc kubenswrapper[4784]: I0106 08:45:03.217478 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72c8f74c-6820-4a79-809e-52284e112277-config-volume\") pod \"72c8f74c-6820-4a79-809e-52284e112277\" (UID: \"72c8f74c-6820-4a79-809e-52284e112277\") " Jan 06 08:45:03 crc kubenswrapper[4784]: I0106 08:45:03.218646 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72c8f74c-6820-4a79-809e-52284e112277-secret-volume\") pod \"72c8f74c-6820-4a79-809e-52284e112277\" (UID: \"72c8f74c-6820-4a79-809e-52284e112277\") " Jan 06 08:45:03 crc kubenswrapper[4784]: I0106 08:45:03.218781 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d5j5n\" (UniqueName: \"kubernetes.io/projected/72c8f74c-6820-4a79-809e-52284e112277-kube-api-access-d5j5n\") pod \"72c8f74c-6820-4a79-809e-52284e112277\" (UID: \"72c8f74c-6820-4a79-809e-52284e112277\") " Jan 06 08:45:03 crc kubenswrapper[4784]: I0106 08:45:03.219809 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72c8f74c-6820-4a79-809e-52284e112277-config-volume" (OuterVolumeSpecName: "config-volume") pod "72c8f74c-6820-4a79-809e-52284e112277" (UID: "72c8f74c-6820-4a79-809e-52284e112277"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 08:45:03 crc kubenswrapper[4784]: I0106 08:45:03.226833 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72c8f74c-6820-4a79-809e-52284e112277-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "72c8f74c-6820-4a79-809e-52284e112277" (UID: "72c8f74c-6820-4a79-809e-52284e112277"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 08:45:03 crc kubenswrapper[4784]: I0106 08:45:03.227390 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72c8f74c-6820-4a79-809e-52284e112277-kube-api-access-d5j5n" (OuterVolumeSpecName: "kube-api-access-d5j5n") pod "72c8f74c-6820-4a79-809e-52284e112277" (UID: "72c8f74c-6820-4a79-809e-52284e112277"). InnerVolumeSpecName "kube-api-access-d5j5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:45:03 crc kubenswrapper[4784]: I0106 08:45:03.320172 4784 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72c8f74c-6820-4a79-809e-52284e112277-config-volume\") on node \"crc\" DevicePath \"\"" Jan 06 08:45:03 crc kubenswrapper[4784]: I0106 08:45:03.320212 4784 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72c8f74c-6820-4a79-809e-52284e112277-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 06 08:45:03 crc kubenswrapper[4784]: I0106 08:45:03.320225 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d5j5n\" (UniqueName: \"kubernetes.io/projected/72c8f74c-6820-4a79-809e-52284e112277-kube-api-access-d5j5n\") on node \"crc\" DevicePath \"\"" Jan 06 08:45:03 crc kubenswrapper[4784]: I0106 08:45:03.843610 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" event={"ID":"72c8f74c-6820-4a79-809e-52284e112277","Type":"ContainerDied","Data":"f632b2ee17343596c9cbe134228db8ff569ca280cd3a1adf0e616384d276b3da"} Jan 06 08:45:03 crc kubenswrapper[4784]: I0106 08:45:03.843884 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f632b2ee17343596c9cbe134228db8ff569ca280cd3a1adf0e616384d276b3da" Jan 06 08:45:03 crc kubenswrapper[4784]: I0106 08:45:03.843681 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg" Jan 06 08:45:16 crc kubenswrapper[4784]: I0106 08:45:16.312670 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:45:16 crc kubenswrapper[4784]: E0106 08:45:16.315356 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:45:31 crc kubenswrapper[4784]: I0106 08:45:31.312487 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:45:31 crc kubenswrapper[4784]: E0106 08:45:31.313435 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:45:45 crc kubenswrapper[4784]: I0106 08:45:45.312483 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:45:45 crc kubenswrapper[4784]: E0106 08:45:45.313151 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:45:57 crc kubenswrapper[4784]: I0106 08:45:57.312442 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:45:57 crc kubenswrapper[4784]: E0106 08:45:57.314425 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:46:11 crc kubenswrapper[4784]: I0106 08:46:11.312940 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:46:11 crc kubenswrapper[4784]: E0106 08:46:11.314033 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:46:26 crc kubenswrapper[4784]: I0106 08:46:26.312331 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:46:26 crc kubenswrapper[4784]: E0106 08:46:26.313432 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:46:39 crc kubenswrapper[4784]: I0106 08:46:39.312926 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:46:39 crc kubenswrapper[4784]: E0106 08:46:39.313904 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:46:50 crc kubenswrapper[4784]: I0106 08:46:50.312942 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:46:50 crc kubenswrapper[4784]: I0106 08:46:50.818235 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"5d7e0e6d10beb71fc228c1b864d429bdf9c83a9fafb4fc89506bad5187ab434c"} Jan 06 08:47:15 crc kubenswrapper[4784]: I0106 08:47:15.481609 4784 scope.go:117] "RemoveContainer" containerID="16d409543bd74263e917578849d0b54b9c2c5d72d080ddde02b505b82d66968c" Jan 06 08:47:15 crc kubenswrapper[4784]: I0106 08:47:15.525441 4784 scope.go:117] "RemoveContainer" containerID="7017ce5a8915554dc5cf88a2e7e3fba9f6ea76caee0bc561aecbc336d35a038a" Jan 06 08:47:15 crc kubenswrapper[4784]: I0106 08:47:15.553618 4784 scope.go:117] "RemoveContainer" containerID="384c58ce53a834e7d476bfaf19432234773f2db0c0da7c5690ebda7fe3820b23" Jan 06 08:49:14 crc kubenswrapper[4784]: I0106 08:49:14.351049 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:49:14 crc kubenswrapper[4784]: I0106 08:49:14.351923 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:49:44 crc kubenswrapper[4784]: I0106 08:49:44.351234 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:49:44 crc kubenswrapper[4784]: I0106 08:49:44.353766 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:50:14 crc kubenswrapper[4784]: I0106 08:50:14.384636 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:50:14 crc kubenswrapper[4784]: I0106 08:50:14.385259 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:50:14 crc kubenswrapper[4784]: I0106 08:50:14.403187 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:50:14 crc kubenswrapper[4784]: I0106 08:50:14.405148 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5d7e0e6d10beb71fc228c1b864d429bdf9c83a9fafb4fc89506bad5187ab434c"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 08:50:14 crc kubenswrapper[4784]: I0106 08:50:14.405245 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://5d7e0e6d10beb71fc228c1b864d429bdf9c83a9fafb4fc89506bad5187ab434c" gracePeriod=600 Jan 06 08:50:14 crc kubenswrapper[4784]: I0106 08:50:14.795583 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="5d7e0e6d10beb71fc228c1b864d429bdf9c83a9fafb4fc89506bad5187ab434c" exitCode=0 Jan 06 08:50:14 crc kubenswrapper[4784]: I0106 08:50:14.795787 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"5d7e0e6d10beb71fc228c1b864d429bdf9c83a9fafb4fc89506bad5187ab434c"} Jan 06 08:50:14 crc kubenswrapper[4784]: I0106 08:50:14.795976 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef"} Jan 06 08:50:14 crc kubenswrapper[4784]: I0106 08:50:14.796011 4784 scope.go:117] "RemoveContainer" containerID="2f59549743fc30070077fea78ff562a726121e76ae877582746c24445e667509" Jan 06 08:51:34 crc kubenswrapper[4784]: I0106 08:51:34.374948 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6gfcq"] Jan 06 08:51:34 crc kubenswrapper[4784]: E0106 08:51:34.376362 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72c8f74c-6820-4a79-809e-52284e112277" containerName="collect-profiles" Jan 06 08:51:34 crc kubenswrapper[4784]: I0106 08:51:34.376392 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="72c8f74c-6820-4a79-809e-52284e112277" containerName="collect-profiles" Jan 06 08:51:34 crc kubenswrapper[4784]: I0106 08:51:34.376794 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="72c8f74c-6820-4a79-809e-52284e112277" containerName="collect-profiles" Jan 06 08:51:34 crc kubenswrapper[4784]: I0106 08:51:34.378951 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:34 crc kubenswrapper[4784]: I0106 08:51:34.393718 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6gfcq"] Jan 06 08:51:34 crc kubenswrapper[4784]: I0106 08:51:34.480100 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2rsx\" (UniqueName: \"kubernetes.io/projected/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-kube-api-access-l2rsx\") pod \"community-operators-6gfcq\" (UID: \"b150a616-6c00-43e0-aa3b-c7f14a11c0ab\") " pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:34 crc kubenswrapper[4784]: I0106 08:51:34.481351 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-catalog-content\") pod \"community-operators-6gfcq\" (UID: \"b150a616-6c00-43e0-aa3b-c7f14a11c0ab\") " pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:34 crc kubenswrapper[4784]: I0106 08:51:34.481459 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-utilities\") pod \"community-operators-6gfcq\" (UID: \"b150a616-6c00-43e0-aa3b-c7f14a11c0ab\") " pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:34 crc kubenswrapper[4784]: I0106 08:51:34.582351 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-catalog-content\") pod \"community-operators-6gfcq\" (UID: \"b150a616-6c00-43e0-aa3b-c7f14a11c0ab\") " pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:34 crc kubenswrapper[4784]: I0106 08:51:34.582412 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-utilities\") pod \"community-operators-6gfcq\" (UID: \"b150a616-6c00-43e0-aa3b-c7f14a11c0ab\") " pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:34 crc kubenswrapper[4784]: I0106 08:51:34.582449 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2rsx\" (UniqueName: \"kubernetes.io/projected/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-kube-api-access-l2rsx\") pod \"community-operators-6gfcq\" (UID: \"b150a616-6c00-43e0-aa3b-c7f14a11c0ab\") " pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:34 crc kubenswrapper[4784]: I0106 08:51:34.582917 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-catalog-content\") pod \"community-operators-6gfcq\" (UID: \"b150a616-6c00-43e0-aa3b-c7f14a11c0ab\") " pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:34 crc kubenswrapper[4784]: I0106 08:51:34.583085 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-utilities\") pod \"community-operators-6gfcq\" (UID: \"b150a616-6c00-43e0-aa3b-c7f14a11c0ab\") " pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:34 crc kubenswrapper[4784]: I0106 08:51:34.605676 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2rsx\" (UniqueName: \"kubernetes.io/projected/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-kube-api-access-l2rsx\") pod \"community-operators-6gfcq\" (UID: \"b150a616-6c00-43e0-aa3b-c7f14a11c0ab\") " pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:34 crc kubenswrapper[4784]: I0106 08:51:34.718827 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:35 crc kubenswrapper[4784]: I0106 08:51:35.234434 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6gfcq"] Jan 06 08:51:35 crc kubenswrapper[4784]: I0106 08:51:35.743184 4784 generic.go:334] "Generic (PLEG): container finished" podID="b150a616-6c00-43e0-aa3b-c7f14a11c0ab" containerID="d55dbf5da23d9fe251f278da9a86f9301b94c1ed25d8ffd17b472e27f57797a2" exitCode=0 Jan 06 08:51:35 crc kubenswrapper[4784]: I0106 08:51:35.743245 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gfcq" event={"ID":"b150a616-6c00-43e0-aa3b-c7f14a11c0ab","Type":"ContainerDied","Data":"d55dbf5da23d9fe251f278da9a86f9301b94c1ed25d8ffd17b472e27f57797a2"} Jan 06 08:51:35 crc kubenswrapper[4784]: I0106 08:51:35.743286 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gfcq" event={"ID":"b150a616-6c00-43e0-aa3b-c7f14a11c0ab","Type":"ContainerStarted","Data":"9cd052f4aabcc5582769cf641568a7346862f455c61020480a047e447e05b8f1"} Jan 06 08:51:35 crc kubenswrapper[4784]: I0106 08:51:35.748134 4784 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 06 08:51:36 crc kubenswrapper[4784]: I0106 08:51:36.753744 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gfcq" event={"ID":"b150a616-6c00-43e0-aa3b-c7f14a11c0ab","Type":"ContainerStarted","Data":"c2b99f9f00f2816ccb2bda1d15f1c105fd7c2e14dc05aed9c8b5ac6577b6dcae"} Jan 06 08:51:37 crc kubenswrapper[4784]: I0106 08:51:37.764067 4784 generic.go:334] "Generic (PLEG): container finished" podID="b150a616-6c00-43e0-aa3b-c7f14a11c0ab" containerID="c2b99f9f00f2816ccb2bda1d15f1c105fd7c2e14dc05aed9c8b5ac6577b6dcae" exitCode=0 Jan 06 08:51:37 crc kubenswrapper[4784]: I0106 08:51:37.764149 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gfcq" event={"ID":"b150a616-6c00-43e0-aa3b-c7f14a11c0ab","Type":"ContainerDied","Data":"c2b99f9f00f2816ccb2bda1d15f1c105fd7c2e14dc05aed9c8b5ac6577b6dcae"} Jan 06 08:51:38 crc kubenswrapper[4784]: I0106 08:51:38.776517 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gfcq" event={"ID":"b150a616-6c00-43e0-aa3b-c7f14a11c0ab","Type":"ContainerStarted","Data":"80f95e9c1f05080d119c17ca68de68cdc47af7cea6d6f9788a9e989cf1ecb23c"} Jan 06 08:51:38 crc kubenswrapper[4784]: I0106 08:51:38.808364 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6gfcq" podStartSLOduration=2.351578966 podStartE2EDuration="4.808342492s" podCreationTimestamp="2026-01-06 08:51:34 +0000 UTC" firstStartedPulling="2026-01-06 08:51:35.747945483 +0000 UTC m=+2197.794118320" lastFinishedPulling="2026-01-06 08:51:38.204708969 +0000 UTC m=+2200.250881846" observedRunningTime="2026-01-06 08:51:38.803839973 +0000 UTC m=+2200.850012840" watchObservedRunningTime="2026-01-06 08:51:38.808342492 +0000 UTC m=+2200.854515359" Jan 06 08:51:44 crc kubenswrapper[4784]: I0106 08:51:44.719012 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:44 crc kubenswrapper[4784]: I0106 08:51:44.719533 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:44 crc kubenswrapper[4784]: I0106 08:51:44.770213 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:44 crc kubenswrapper[4784]: I0106 08:51:44.888649 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:45 crc kubenswrapper[4784]: I0106 08:51:45.005173 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6gfcq"] Jan 06 08:51:46 crc kubenswrapper[4784]: I0106 08:51:46.840507 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6gfcq" podUID="b150a616-6c00-43e0-aa3b-c7f14a11c0ab" containerName="registry-server" containerID="cri-o://80f95e9c1f05080d119c17ca68de68cdc47af7cea6d6f9788a9e989cf1ecb23c" gracePeriod=2 Jan 06 08:51:47 crc kubenswrapper[4784]: I0106 08:51:47.852852 4784 generic.go:334] "Generic (PLEG): container finished" podID="b150a616-6c00-43e0-aa3b-c7f14a11c0ab" containerID="80f95e9c1f05080d119c17ca68de68cdc47af7cea6d6f9788a9e989cf1ecb23c" exitCode=0 Jan 06 08:51:47 crc kubenswrapper[4784]: I0106 08:51:47.852919 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gfcq" event={"ID":"b150a616-6c00-43e0-aa3b-c7f14a11c0ab","Type":"ContainerDied","Data":"80f95e9c1f05080d119c17ca68de68cdc47af7cea6d6f9788a9e989cf1ecb23c"} Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.387788 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.489063 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-utilities\") pod \"b150a616-6c00-43e0-aa3b-c7f14a11c0ab\" (UID: \"b150a616-6c00-43e0-aa3b-c7f14a11c0ab\") " Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.489211 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-catalog-content\") pod \"b150a616-6c00-43e0-aa3b-c7f14a11c0ab\" (UID: \"b150a616-6c00-43e0-aa3b-c7f14a11c0ab\") " Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.489290 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2rsx\" (UniqueName: \"kubernetes.io/projected/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-kube-api-access-l2rsx\") pod \"b150a616-6c00-43e0-aa3b-c7f14a11c0ab\" (UID: \"b150a616-6c00-43e0-aa3b-c7f14a11c0ab\") " Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.490459 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-utilities" (OuterVolumeSpecName: "utilities") pod "b150a616-6c00-43e0-aa3b-c7f14a11c0ab" (UID: "b150a616-6c00-43e0-aa3b-c7f14a11c0ab"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.495363 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-kube-api-access-l2rsx" (OuterVolumeSpecName: "kube-api-access-l2rsx") pod "b150a616-6c00-43e0-aa3b-c7f14a11c0ab" (UID: "b150a616-6c00-43e0-aa3b-c7f14a11c0ab"). InnerVolumeSpecName "kube-api-access-l2rsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.548792 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b150a616-6c00-43e0-aa3b-c7f14a11c0ab" (UID: "b150a616-6c00-43e0-aa3b-c7f14a11c0ab"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.591273 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.591314 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.591330 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2rsx\" (UniqueName: \"kubernetes.io/projected/b150a616-6c00-43e0-aa3b-c7f14a11c0ab-kube-api-access-l2rsx\") on node \"crc\" DevicePath \"\"" Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.864832 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gfcq" event={"ID":"b150a616-6c00-43e0-aa3b-c7f14a11c0ab","Type":"ContainerDied","Data":"9cd052f4aabcc5582769cf641568a7346862f455c61020480a047e447e05b8f1"} Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.864902 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gfcq" Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.864924 4784 scope.go:117] "RemoveContainer" containerID="80f95e9c1f05080d119c17ca68de68cdc47af7cea6d6f9788a9e989cf1ecb23c" Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.904896 4784 scope.go:117] "RemoveContainer" containerID="c2b99f9f00f2816ccb2bda1d15f1c105fd7c2e14dc05aed9c8b5ac6577b6dcae" Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.920270 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6gfcq"] Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.936068 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6gfcq"] Jan 06 08:51:48 crc kubenswrapper[4784]: I0106 08:51:48.942501 4784 scope.go:117] "RemoveContainer" containerID="d55dbf5da23d9fe251f278da9a86f9301b94c1ed25d8ffd17b472e27f57797a2" Jan 06 08:51:50 crc kubenswrapper[4784]: I0106 08:51:50.324661 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b150a616-6c00-43e0-aa3b-c7f14a11c0ab" path="/var/lib/kubelet/pods/b150a616-6c00-43e0-aa3b-c7f14a11c0ab/volumes" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.439761 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jv94t"] Jan 06 08:52:05 crc kubenswrapper[4784]: E0106 08:52:05.442094 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b150a616-6c00-43e0-aa3b-c7f14a11c0ab" containerName="registry-server" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.442124 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b150a616-6c00-43e0-aa3b-c7f14a11c0ab" containerName="registry-server" Jan 06 08:52:05 crc kubenswrapper[4784]: E0106 08:52:05.442176 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b150a616-6c00-43e0-aa3b-c7f14a11c0ab" containerName="extract-content" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.442190 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b150a616-6c00-43e0-aa3b-c7f14a11c0ab" containerName="extract-content" Jan 06 08:52:05 crc kubenswrapper[4784]: E0106 08:52:05.442204 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b150a616-6c00-43e0-aa3b-c7f14a11c0ab" containerName="extract-utilities" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.442217 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b150a616-6c00-43e0-aa3b-c7f14a11c0ab" containerName="extract-utilities" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.442442 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="b150a616-6c00-43e0-aa3b-c7f14a11c0ab" containerName="registry-server" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.445408 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.465919 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jv94t"] Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.580193 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-utilities\") pod \"redhat-marketplace-jv94t\" (UID: \"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12\") " pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.580608 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfx4s\" (UniqueName: \"kubernetes.io/projected/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-kube-api-access-qfx4s\") pod \"redhat-marketplace-jv94t\" (UID: \"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12\") " pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.580662 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-catalog-content\") pod \"redhat-marketplace-jv94t\" (UID: \"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12\") " pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.682074 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-utilities\") pod \"redhat-marketplace-jv94t\" (UID: \"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12\") " pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.682143 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfx4s\" (UniqueName: \"kubernetes.io/projected/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-kube-api-access-qfx4s\") pod \"redhat-marketplace-jv94t\" (UID: \"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12\") " pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.682168 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-catalog-content\") pod \"redhat-marketplace-jv94t\" (UID: \"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12\") " pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.682631 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-utilities\") pod \"redhat-marketplace-jv94t\" (UID: \"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12\") " pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.682642 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-catalog-content\") pod \"redhat-marketplace-jv94t\" (UID: \"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12\") " pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.707854 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfx4s\" (UniqueName: \"kubernetes.io/projected/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-kube-api-access-qfx4s\") pod \"redhat-marketplace-jv94t\" (UID: \"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12\") " pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:05 crc kubenswrapper[4784]: I0106 08:52:05.778452 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:06 crc kubenswrapper[4784]: I0106 08:52:06.282274 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jv94t"] Jan 06 08:52:07 crc kubenswrapper[4784]: I0106 08:52:07.035929 4784 generic.go:334] "Generic (PLEG): container finished" podID="fe75f9a1-551c-47c1-b4a9-4b9091dc0b12" containerID="61fb61b72c6eb4708090992ff771eded8eaf1c8dfc95fd21d4c1fffa4e646366" exitCode=0 Jan 06 08:52:07 crc kubenswrapper[4784]: I0106 08:52:07.035986 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jv94t" event={"ID":"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12","Type":"ContainerDied","Data":"61fb61b72c6eb4708090992ff771eded8eaf1c8dfc95fd21d4c1fffa4e646366"} Jan 06 08:52:07 crc kubenswrapper[4784]: I0106 08:52:07.036020 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jv94t" event={"ID":"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12","Type":"ContainerStarted","Data":"2c0f55d30fca10375541b72c228807f7e4a7b34ab4c90dbec3456f5fb056ae30"} Jan 06 08:52:09 crc kubenswrapper[4784]: I0106 08:52:09.080713 4784 generic.go:334] "Generic (PLEG): container finished" podID="fe75f9a1-551c-47c1-b4a9-4b9091dc0b12" containerID="8ae7a59dcd359dafa52c00cbccdfcd6c5ff028dfa34e9dea4c89a8cf37364f0a" exitCode=0 Jan 06 08:52:09 crc kubenswrapper[4784]: I0106 08:52:09.080866 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jv94t" event={"ID":"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12","Type":"ContainerDied","Data":"8ae7a59dcd359dafa52c00cbccdfcd6c5ff028dfa34e9dea4c89a8cf37364f0a"} Jan 06 08:52:10 crc kubenswrapper[4784]: I0106 08:52:10.091079 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jv94t" event={"ID":"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12","Type":"ContainerStarted","Data":"9998474462b45162d15c01aa3d24c00cc9d1061c8c1bd1b58460c3cd6f3667c2"} Jan 06 08:52:10 crc kubenswrapper[4784]: I0106 08:52:10.127777 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jv94t" podStartSLOduration=2.6315702930000002 podStartE2EDuration="5.127747985s" podCreationTimestamp="2026-01-06 08:52:05 +0000 UTC" firstStartedPulling="2026-01-06 08:52:07.039003744 +0000 UTC m=+2229.085176601" lastFinishedPulling="2026-01-06 08:52:09.535181446 +0000 UTC m=+2231.581354293" observedRunningTime="2026-01-06 08:52:10.117776094 +0000 UTC m=+2232.163948941" watchObservedRunningTime="2026-01-06 08:52:10.127747985 +0000 UTC m=+2232.173920862" Jan 06 08:52:14 crc kubenswrapper[4784]: I0106 08:52:14.351220 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:52:14 crc kubenswrapper[4784]: I0106 08:52:14.351582 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:52:15 crc kubenswrapper[4784]: I0106 08:52:15.778637 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:15 crc kubenswrapper[4784]: I0106 08:52:15.779140 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:15 crc kubenswrapper[4784]: I0106 08:52:15.844814 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:16 crc kubenswrapper[4784]: I0106 08:52:16.188474 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:16 crc kubenswrapper[4784]: I0106 08:52:16.243211 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jv94t"] Jan 06 08:52:18 crc kubenswrapper[4784]: I0106 08:52:18.158712 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jv94t" podUID="fe75f9a1-551c-47c1-b4a9-4b9091dc0b12" containerName="registry-server" containerID="cri-o://9998474462b45162d15c01aa3d24c00cc9d1061c8c1bd1b58460c3cd6f3667c2" gracePeriod=2 Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.132993 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.168528 4784 generic.go:334] "Generic (PLEG): container finished" podID="fe75f9a1-551c-47c1-b4a9-4b9091dc0b12" containerID="9998474462b45162d15c01aa3d24c00cc9d1061c8c1bd1b58460c3cd6f3667c2" exitCode=0 Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.168588 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jv94t" event={"ID":"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12","Type":"ContainerDied","Data":"9998474462b45162d15c01aa3d24c00cc9d1061c8c1bd1b58460c3cd6f3667c2"} Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.168611 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jv94t" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.168628 4784 scope.go:117] "RemoveContainer" containerID="9998474462b45162d15c01aa3d24c00cc9d1061c8c1bd1b58460c3cd6f3667c2" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.168616 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jv94t" event={"ID":"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12","Type":"ContainerDied","Data":"2c0f55d30fca10375541b72c228807f7e4a7b34ab4c90dbec3456f5fb056ae30"} Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.197053 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-utilities\") pod \"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12\" (UID: \"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12\") " Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.197455 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-catalog-content\") pod \"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12\" (UID: \"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12\") " Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.197523 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfx4s\" (UniqueName: \"kubernetes.io/projected/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-kube-api-access-qfx4s\") pod \"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12\" (UID: \"fe75f9a1-551c-47c1-b4a9-4b9091dc0b12\") " Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.198472 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-utilities" (OuterVolumeSpecName: "utilities") pod "fe75f9a1-551c-47c1-b4a9-4b9091dc0b12" (UID: "fe75f9a1-551c-47c1-b4a9-4b9091dc0b12"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.202842 4784 scope.go:117] "RemoveContainer" containerID="8ae7a59dcd359dafa52c00cbccdfcd6c5ff028dfa34e9dea4c89a8cf37364f0a" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.204050 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-kube-api-access-qfx4s" (OuterVolumeSpecName: "kube-api-access-qfx4s") pod "fe75f9a1-551c-47c1-b4a9-4b9091dc0b12" (UID: "fe75f9a1-551c-47c1-b4a9-4b9091dc0b12"). InnerVolumeSpecName "kube-api-access-qfx4s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.224481 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fe75f9a1-551c-47c1-b4a9-4b9091dc0b12" (UID: "fe75f9a1-551c-47c1-b4a9-4b9091dc0b12"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.251193 4784 scope.go:117] "RemoveContainer" containerID="61fb61b72c6eb4708090992ff771eded8eaf1c8dfc95fd21d4c1fffa4e646366" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.281374 4784 scope.go:117] "RemoveContainer" containerID="9998474462b45162d15c01aa3d24c00cc9d1061c8c1bd1b58460c3cd6f3667c2" Jan 06 08:52:19 crc kubenswrapper[4784]: E0106 08:52:19.282193 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9998474462b45162d15c01aa3d24c00cc9d1061c8c1bd1b58460c3cd6f3667c2\": container with ID starting with 9998474462b45162d15c01aa3d24c00cc9d1061c8c1bd1b58460c3cd6f3667c2 not found: ID does not exist" containerID="9998474462b45162d15c01aa3d24c00cc9d1061c8c1bd1b58460c3cd6f3667c2" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.282262 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9998474462b45162d15c01aa3d24c00cc9d1061c8c1bd1b58460c3cd6f3667c2"} err="failed to get container status \"9998474462b45162d15c01aa3d24c00cc9d1061c8c1bd1b58460c3cd6f3667c2\": rpc error: code = NotFound desc = could not find container \"9998474462b45162d15c01aa3d24c00cc9d1061c8c1bd1b58460c3cd6f3667c2\": container with ID starting with 9998474462b45162d15c01aa3d24c00cc9d1061c8c1bd1b58460c3cd6f3667c2 not found: ID does not exist" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.282303 4784 scope.go:117] "RemoveContainer" containerID="8ae7a59dcd359dafa52c00cbccdfcd6c5ff028dfa34e9dea4c89a8cf37364f0a" Jan 06 08:52:19 crc kubenswrapper[4784]: E0106 08:52:19.282882 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ae7a59dcd359dafa52c00cbccdfcd6c5ff028dfa34e9dea4c89a8cf37364f0a\": container with ID starting with 8ae7a59dcd359dafa52c00cbccdfcd6c5ff028dfa34e9dea4c89a8cf37364f0a not found: ID does not exist" containerID="8ae7a59dcd359dafa52c00cbccdfcd6c5ff028dfa34e9dea4c89a8cf37364f0a" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.282941 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ae7a59dcd359dafa52c00cbccdfcd6c5ff028dfa34e9dea4c89a8cf37364f0a"} err="failed to get container status \"8ae7a59dcd359dafa52c00cbccdfcd6c5ff028dfa34e9dea4c89a8cf37364f0a\": rpc error: code = NotFound desc = could not find container \"8ae7a59dcd359dafa52c00cbccdfcd6c5ff028dfa34e9dea4c89a8cf37364f0a\": container with ID starting with 8ae7a59dcd359dafa52c00cbccdfcd6c5ff028dfa34e9dea4c89a8cf37364f0a not found: ID does not exist" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.282973 4784 scope.go:117] "RemoveContainer" containerID="61fb61b72c6eb4708090992ff771eded8eaf1c8dfc95fd21d4c1fffa4e646366" Jan 06 08:52:19 crc kubenswrapper[4784]: E0106 08:52:19.283399 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61fb61b72c6eb4708090992ff771eded8eaf1c8dfc95fd21d4c1fffa4e646366\": container with ID starting with 61fb61b72c6eb4708090992ff771eded8eaf1c8dfc95fd21d4c1fffa4e646366 not found: ID does not exist" containerID="61fb61b72c6eb4708090992ff771eded8eaf1c8dfc95fd21d4c1fffa4e646366" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.283447 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61fb61b72c6eb4708090992ff771eded8eaf1c8dfc95fd21d4c1fffa4e646366"} err="failed to get container status \"61fb61b72c6eb4708090992ff771eded8eaf1c8dfc95fd21d4c1fffa4e646366\": rpc error: code = NotFound desc = could not find container \"61fb61b72c6eb4708090992ff771eded8eaf1c8dfc95fd21d4c1fffa4e646366\": container with ID starting with 61fb61b72c6eb4708090992ff771eded8eaf1c8dfc95fd21d4c1fffa4e646366 not found: ID does not exist" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.299743 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfx4s\" (UniqueName: \"kubernetes.io/projected/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-kube-api-access-qfx4s\") on node \"crc\" DevicePath \"\"" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.299798 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.299838 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.528684 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jv94t"] Jan 06 08:52:19 crc kubenswrapper[4784]: I0106 08:52:19.538119 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jv94t"] Jan 06 08:52:20 crc kubenswrapper[4784]: I0106 08:52:20.327325 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe75f9a1-551c-47c1-b4a9-4b9091dc0b12" path="/var/lib/kubelet/pods/fe75f9a1-551c-47c1-b4a9-4b9091dc0b12/volumes" Jan 06 08:52:44 crc kubenswrapper[4784]: I0106 08:52:44.351769 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:52:44 crc kubenswrapper[4784]: I0106 08:52:44.352366 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:53:14 crc kubenswrapper[4784]: I0106 08:53:14.351050 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 08:53:14 crc kubenswrapper[4784]: I0106 08:53:14.352750 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 08:53:14 crc kubenswrapper[4784]: I0106 08:53:14.352840 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 08:53:14 crc kubenswrapper[4784]: I0106 08:53:14.353644 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 08:53:14 crc kubenswrapper[4784]: I0106 08:53:14.353738 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" gracePeriod=600 Jan 06 08:53:14 crc kubenswrapper[4784]: E0106 08:53:14.484516 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:53:14 crc kubenswrapper[4784]: I0106 08:53:14.699754 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" exitCode=0 Jan 06 08:53:14 crc kubenswrapper[4784]: I0106 08:53:14.699831 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef"} Jan 06 08:53:14 crc kubenswrapper[4784]: I0106 08:53:14.699920 4784 scope.go:117] "RemoveContainer" containerID="5d7e0e6d10beb71fc228c1b864d429bdf9c83a9fafb4fc89506bad5187ab434c" Jan 06 08:53:14 crc kubenswrapper[4784]: I0106 08:53:14.701019 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:53:14 crc kubenswrapper[4784]: E0106 08:53:14.701589 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:53:28 crc kubenswrapper[4784]: I0106 08:53:28.331363 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:53:28 crc kubenswrapper[4784]: E0106 08:53:28.332477 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:53:40 crc kubenswrapper[4784]: I0106 08:53:40.312909 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:53:40 crc kubenswrapper[4784]: E0106 08:53:40.313659 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.253379 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fxscp"] Jan 06 08:53:51 crc kubenswrapper[4784]: E0106 08:53:51.254989 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe75f9a1-551c-47c1-b4a9-4b9091dc0b12" containerName="extract-content" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.255182 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe75f9a1-551c-47c1-b4a9-4b9091dc0b12" containerName="extract-content" Jan 06 08:53:51 crc kubenswrapper[4784]: E0106 08:53:51.255205 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe75f9a1-551c-47c1-b4a9-4b9091dc0b12" containerName="registry-server" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.255217 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe75f9a1-551c-47c1-b4a9-4b9091dc0b12" containerName="registry-server" Jan 06 08:53:51 crc kubenswrapper[4784]: E0106 08:53:51.255263 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe75f9a1-551c-47c1-b4a9-4b9091dc0b12" containerName="extract-utilities" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.255277 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe75f9a1-551c-47c1-b4a9-4b9091dc0b12" containerName="extract-utilities" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.255517 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe75f9a1-551c-47c1-b4a9-4b9091dc0b12" containerName="registry-server" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.257408 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.273257 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fxscp"] Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.389483 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-utilities\") pod \"redhat-operators-fxscp\" (UID: \"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8\") " pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.389778 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-catalog-content\") pod \"redhat-operators-fxscp\" (UID: \"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8\") " pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.389859 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wzmm\" (UniqueName: \"kubernetes.io/projected/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-kube-api-access-6wzmm\") pod \"redhat-operators-fxscp\" (UID: \"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8\") " pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.490905 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-catalog-content\") pod \"redhat-operators-fxscp\" (UID: \"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8\") " pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.490978 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wzmm\" (UniqueName: \"kubernetes.io/projected/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-kube-api-access-6wzmm\") pod \"redhat-operators-fxscp\" (UID: \"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8\") " pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.491037 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-utilities\") pod \"redhat-operators-fxscp\" (UID: \"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8\") " pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.491682 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-catalog-content\") pod \"redhat-operators-fxscp\" (UID: \"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8\") " pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.491747 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-utilities\") pod \"redhat-operators-fxscp\" (UID: \"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8\") " pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.515690 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wzmm\" (UniqueName: \"kubernetes.io/projected/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-kube-api-access-6wzmm\") pod \"redhat-operators-fxscp\" (UID: \"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8\") " pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:53:51 crc kubenswrapper[4784]: I0106 08:53:51.638397 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:53:52 crc kubenswrapper[4784]: I0106 08:53:52.116403 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fxscp"] Jan 06 08:53:53 crc kubenswrapper[4784]: I0106 08:53:53.066139 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" containerID="d3d8540aded578b58aa58fbdf29905af4299955518a98a4b414dacf85856725b" exitCode=0 Jan 06 08:53:53 crc kubenswrapper[4784]: I0106 08:53:53.066292 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fxscp" event={"ID":"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8","Type":"ContainerDied","Data":"d3d8540aded578b58aa58fbdf29905af4299955518a98a4b414dacf85856725b"} Jan 06 08:53:53 crc kubenswrapper[4784]: I0106 08:53:53.066706 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fxscp" event={"ID":"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8","Type":"ContainerStarted","Data":"34cc880050479aecfca75d97fd0ededaadcc0caafac3238c2e945f0c5e5a737c"} Jan 06 08:53:54 crc kubenswrapper[4784]: I0106 08:53:54.312711 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:53:54 crc kubenswrapper[4784]: E0106 08:53:54.313672 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:53:55 crc kubenswrapper[4784]: I0106 08:53:55.087129 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" containerID="1556086ccc612aa564611b6d7d8d5465665e52c9bc252f1bb534290d45974d57" exitCode=0 Jan 06 08:53:55 crc kubenswrapper[4784]: I0106 08:53:55.087177 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fxscp" event={"ID":"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8","Type":"ContainerDied","Data":"1556086ccc612aa564611b6d7d8d5465665e52c9bc252f1bb534290d45974d57"} Jan 06 08:53:56 crc kubenswrapper[4784]: I0106 08:53:56.098753 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fxscp" event={"ID":"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8","Type":"ContainerStarted","Data":"8b05bc1f13c8684d84a204ea94f2b3e2e35a777973fd01629dedaa28889ae5a2"} Jan 06 08:53:56 crc kubenswrapper[4784]: I0106 08:53:56.125615 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fxscp" podStartSLOduration=2.570844961 podStartE2EDuration="5.125497029s" podCreationTimestamp="2026-01-06 08:53:51 +0000 UTC" firstStartedPulling="2026-01-06 08:53:53.068706882 +0000 UTC m=+2335.114879719" lastFinishedPulling="2026-01-06 08:53:55.62335892 +0000 UTC m=+2337.669531787" observedRunningTime="2026-01-06 08:53:56.124481527 +0000 UTC m=+2338.170654374" watchObservedRunningTime="2026-01-06 08:53:56.125497029 +0000 UTC m=+2338.171669946" Jan 06 08:54:01 crc kubenswrapper[4784]: I0106 08:54:01.638731 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:54:01 crc kubenswrapper[4784]: I0106 08:54:01.639417 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:54:02 crc kubenswrapper[4784]: I0106 08:54:02.699651 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fxscp" podUID="a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" containerName="registry-server" probeResult="failure" output=< Jan 06 08:54:02 crc kubenswrapper[4784]: timeout: failed to connect service ":50051" within 1s Jan 06 08:54:02 crc kubenswrapper[4784]: > Jan 06 08:54:08 crc kubenswrapper[4784]: I0106 08:54:08.318204 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:54:08 crc kubenswrapper[4784]: E0106 08:54:08.319198 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:54:11 crc kubenswrapper[4784]: I0106 08:54:11.713296 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:54:11 crc kubenswrapper[4784]: I0106 08:54:11.787610 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:54:11 crc kubenswrapper[4784]: I0106 08:54:11.961040 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fxscp"] Jan 06 08:54:13 crc kubenswrapper[4784]: I0106 08:54:13.258238 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fxscp" podUID="a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" containerName="registry-server" containerID="cri-o://8b05bc1f13c8684d84a204ea94f2b3e2e35a777973fd01629dedaa28889ae5a2" gracePeriod=2 Jan 06 08:54:14 crc kubenswrapper[4784]: I0106 08:54:14.268024 4784 generic.go:334] "Generic (PLEG): container finished" podID="a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" containerID="8b05bc1f13c8684d84a204ea94f2b3e2e35a777973fd01629dedaa28889ae5a2" exitCode=0 Jan 06 08:54:14 crc kubenswrapper[4784]: I0106 08:54:14.268254 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fxscp" event={"ID":"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8","Type":"ContainerDied","Data":"8b05bc1f13c8684d84a204ea94f2b3e2e35a777973fd01629dedaa28889ae5a2"} Jan 06 08:54:14 crc kubenswrapper[4784]: I0106 08:54:14.268305 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fxscp" event={"ID":"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8","Type":"ContainerDied","Data":"34cc880050479aecfca75d97fd0ededaadcc0caafac3238c2e945f0c5e5a737c"} Jan 06 08:54:14 crc kubenswrapper[4784]: I0106 08:54:14.268320 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="34cc880050479aecfca75d97fd0ededaadcc0caafac3238c2e945f0c5e5a737c" Jan 06 08:54:14 crc kubenswrapper[4784]: I0106 08:54:14.314009 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:54:14 crc kubenswrapper[4784]: I0106 08:54:14.398664 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-utilities\") pod \"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8\" (UID: \"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8\") " Jan 06 08:54:14 crc kubenswrapper[4784]: I0106 08:54:14.398864 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6wzmm\" (UniqueName: \"kubernetes.io/projected/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-kube-api-access-6wzmm\") pod \"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8\" (UID: \"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8\") " Jan 06 08:54:14 crc kubenswrapper[4784]: I0106 08:54:14.398986 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-catalog-content\") pod \"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8\" (UID: \"a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8\") " Jan 06 08:54:14 crc kubenswrapper[4784]: I0106 08:54:14.399434 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-utilities" (OuterVolumeSpecName: "utilities") pod "a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" (UID: "a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:54:14 crc kubenswrapper[4784]: I0106 08:54:14.399840 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:54:14 crc kubenswrapper[4784]: I0106 08:54:14.408206 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-kube-api-access-6wzmm" (OuterVolumeSpecName: "kube-api-access-6wzmm") pod "a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" (UID: "a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8"). InnerVolumeSpecName "kube-api-access-6wzmm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:54:14 crc kubenswrapper[4784]: I0106 08:54:14.500941 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6wzmm\" (UniqueName: \"kubernetes.io/projected/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-kube-api-access-6wzmm\") on node \"crc\" DevicePath \"\"" Jan 06 08:54:14 crc kubenswrapper[4784]: I0106 08:54:14.523558 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" (UID: "a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:54:14 crc kubenswrapper[4784]: I0106 08:54:14.602696 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:54:15 crc kubenswrapper[4784]: I0106 08:54:15.274741 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fxscp" Jan 06 08:54:15 crc kubenswrapper[4784]: I0106 08:54:15.308675 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fxscp"] Jan 06 08:54:15 crc kubenswrapper[4784]: I0106 08:54:15.319331 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fxscp"] Jan 06 08:54:16 crc kubenswrapper[4784]: I0106 08:54:16.326586 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" path="/var/lib/kubelet/pods/a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8/volumes" Jan 06 08:54:20 crc kubenswrapper[4784]: I0106 08:54:20.311732 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:54:20 crc kubenswrapper[4784]: E0106 08:54:20.312250 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:54:33 crc kubenswrapper[4784]: I0106 08:54:33.312757 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:54:33 crc kubenswrapper[4784]: E0106 08:54:33.313768 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.029487 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5mj2b"] Jan 06 08:54:41 crc kubenswrapper[4784]: E0106 08:54:41.029942 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" containerName="extract-utilities" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.029959 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" containerName="extract-utilities" Jan 06 08:54:41 crc kubenswrapper[4784]: E0106 08:54:41.029987 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" containerName="extract-content" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.029994 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" containerName="extract-content" Jan 06 08:54:41 crc kubenswrapper[4784]: E0106 08:54:41.030021 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" containerName="registry-server" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.030027 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" containerName="registry-server" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.030148 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2ec2db7-54d2-4a1c-8ac2-1175b8b651e8" containerName="registry-server" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.031587 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.061217 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5mj2b"] Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.221424 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a597840-28a7-4f26-aa5c-f1be2207a64c-catalog-content\") pod \"certified-operators-5mj2b\" (UID: \"4a597840-28a7-4f26-aa5c-f1be2207a64c\") " pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.221565 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxxb7\" (UniqueName: \"kubernetes.io/projected/4a597840-28a7-4f26-aa5c-f1be2207a64c-kube-api-access-pxxb7\") pod \"certified-operators-5mj2b\" (UID: \"4a597840-28a7-4f26-aa5c-f1be2207a64c\") " pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.221636 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a597840-28a7-4f26-aa5c-f1be2207a64c-utilities\") pod \"certified-operators-5mj2b\" (UID: \"4a597840-28a7-4f26-aa5c-f1be2207a64c\") " pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.322785 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a597840-28a7-4f26-aa5c-f1be2207a64c-catalog-content\") pod \"certified-operators-5mj2b\" (UID: \"4a597840-28a7-4f26-aa5c-f1be2207a64c\") " pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.323006 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxxb7\" (UniqueName: \"kubernetes.io/projected/4a597840-28a7-4f26-aa5c-f1be2207a64c-kube-api-access-pxxb7\") pod \"certified-operators-5mj2b\" (UID: \"4a597840-28a7-4f26-aa5c-f1be2207a64c\") " pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.323052 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a597840-28a7-4f26-aa5c-f1be2207a64c-utilities\") pod \"certified-operators-5mj2b\" (UID: \"4a597840-28a7-4f26-aa5c-f1be2207a64c\") " pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.323415 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a597840-28a7-4f26-aa5c-f1be2207a64c-catalog-content\") pod \"certified-operators-5mj2b\" (UID: \"4a597840-28a7-4f26-aa5c-f1be2207a64c\") " pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.323519 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a597840-28a7-4f26-aa5c-f1be2207a64c-utilities\") pod \"certified-operators-5mj2b\" (UID: \"4a597840-28a7-4f26-aa5c-f1be2207a64c\") " pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.346854 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxxb7\" (UniqueName: \"kubernetes.io/projected/4a597840-28a7-4f26-aa5c-f1be2207a64c-kube-api-access-pxxb7\") pod \"certified-operators-5mj2b\" (UID: \"4a597840-28a7-4f26-aa5c-f1be2207a64c\") " pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.361792 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:41 crc kubenswrapper[4784]: I0106 08:54:41.881761 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5mj2b"] Jan 06 08:54:42 crc kubenswrapper[4784]: I0106 08:54:42.534637 4784 generic.go:334] "Generic (PLEG): container finished" podID="4a597840-28a7-4f26-aa5c-f1be2207a64c" containerID="33f1427e9e438415e21bb43003f8813c6f71dd5791df97824fe2e09be1f7123b" exitCode=0 Jan 06 08:54:42 crc kubenswrapper[4784]: I0106 08:54:42.534863 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mj2b" event={"ID":"4a597840-28a7-4f26-aa5c-f1be2207a64c","Type":"ContainerDied","Data":"33f1427e9e438415e21bb43003f8813c6f71dd5791df97824fe2e09be1f7123b"} Jan 06 08:54:42 crc kubenswrapper[4784]: I0106 08:54:42.535144 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mj2b" event={"ID":"4a597840-28a7-4f26-aa5c-f1be2207a64c","Type":"ContainerStarted","Data":"452787c301a9a4edcc9111db967455041cf522bd5ec0e85af6e3476758d30f0d"} Jan 06 08:54:44 crc kubenswrapper[4784]: I0106 08:54:44.552463 4784 generic.go:334] "Generic (PLEG): container finished" podID="4a597840-28a7-4f26-aa5c-f1be2207a64c" containerID="a50917111c6c2ae93046bb31d64838c0238aa21d70b3340d416df0e6a396f4dd" exitCode=0 Jan 06 08:54:44 crc kubenswrapper[4784]: I0106 08:54:44.552598 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mj2b" event={"ID":"4a597840-28a7-4f26-aa5c-f1be2207a64c","Type":"ContainerDied","Data":"a50917111c6c2ae93046bb31d64838c0238aa21d70b3340d416df0e6a396f4dd"} Jan 06 08:54:46 crc kubenswrapper[4784]: I0106 08:54:46.312387 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:54:46 crc kubenswrapper[4784]: E0106 08:54:46.313462 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:54:46 crc kubenswrapper[4784]: I0106 08:54:46.575861 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mj2b" event={"ID":"4a597840-28a7-4f26-aa5c-f1be2207a64c","Type":"ContainerStarted","Data":"8544e8c2ecebda0f8d8c0c7afc63bdde391c6f203d290ca5bde36048705b79ed"} Jan 06 08:54:46 crc kubenswrapper[4784]: I0106 08:54:46.608127 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5mj2b" podStartSLOduration=3.397819358 podStartE2EDuration="6.608103035s" podCreationTimestamp="2026-01-06 08:54:40 +0000 UTC" firstStartedPulling="2026-01-06 08:54:42.537172426 +0000 UTC m=+2384.583345273" lastFinishedPulling="2026-01-06 08:54:45.747456063 +0000 UTC m=+2387.793628950" observedRunningTime="2026-01-06 08:54:46.603794852 +0000 UTC m=+2388.649967739" watchObservedRunningTime="2026-01-06 08:54:46.608103035 +0000 UTC m=+2388.654275882" Jan 06 08:54:51 crc kubenswrapper[4784]: I0106 08:54:51.362416 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:51 crc kubenswrapper[4784]: I0106 08:54:51.362861 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:51 crc kubenswrapper[4784]: I0106 08:54:51.446828 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:51 crc kubenswrapper[4784]: I0106 08:54:51.692189 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:51 crc kubenswrapper[4784]: I0106 08:54:51.760837 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5mj2b"] Jan 06 08:54:53 crc kubenswrapper[4784]: I0106 08:54:53.640093 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5mj2b" podUID="4a597840-28a7-4f26-aa5c-f1be2207a64c" containerName="registry-server" containerID="cri-o://8544e8c2ecebda0f8d8c0c7afc63bdde391c6f203d290ca5bde36048705b79ed" gracePeriod=2 Jan 06 08:54:54 crc kubenswrapper[4784]: I0106 08:54:54.651852 4784 generic.go:334] "Generic (PLEG): container finished" podID="4a597840-28a7-4f26-aa5c-f1be2207a64c" containerID="8544e8c2ecebda0f8d8c0c7afc63bdde391c6f203d290ca5bde36048705b79ed" exitCode=0 Jan 06 08:54:54 crc kubenswrapper[4784]: I0106 08:54:54.651908 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mj2b" event={"ID":"4a597840-28a7-4f26-aa5c-f1be2207a64c","Type":"ContainerDied","Data":"8544e8c2ecebda0f8d8c0c7afc63bdde391c6f203d290ca5bde36048705b79ed"} Jan 06 08:54:54 crc kubenswrapper[4784]: I0106 08:54:54.916826 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.093992 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxxb7\" (UniqueName: \"kubernetes.io/projected/4a597840-28a7-4f26-aa5c-f1be2207a64c-kube-api-access-pxxb7\") pod \"4a597840-28a7-4f26-aa5c-f1be2207a64c\" (UID: \"4a597840-28a7-4f26-aa5c-f1be2207a64c\") " Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.094486 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a597840-28a7-4f26-aa5c-f1be2207a64c-catalog-content\") pod \"4a597840-28a7-4f26-aa5c-f1be2207a64c\" (UID: \"4a597840-28a7-4f26-aa5c-f1be2207a64c\") " Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.094822 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a597840-28a7-4f26-aa5c-f1be2207a64c-utilities\") pod \"4a597840-28a7-4f26-aa5c-f1be2207a64c\" (UID: \"4a597840-28a7-4f26-aa5c-f1be2207a64c\") " Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.095831 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a597840-28a7-4f26-aa5c-f1be2207a64c-utilities" (OuterVolumeSpecName: "utilities") pod "4a597840-28a7-4f26-aa5c-f1be2207a64c" (UID: "4a597840-28a7-4f26-aa5c-f1be2207a64c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.110221 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a597840-28a7-4f26-aa5c-f1be2207a64c-kube-api-access-pxxb7" (OuterVolumeSpecName: "kube-api-access-pxxb7") pod "4a597840-28a7-4f26-aa5c-f1be2207a64c" (UID: "4a597840-28a7-4f26-aa5c-f1be2207a64c"). InnerVolumeSpecName "kube-api-access-pxxb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.172682 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a597840-28a7-4f26-aa5c-f1be2207a64c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4a597840-28a7-4f26-aa5c-f1be2207a64c" (UID: "4a597840-28a7-4f26-aa5c-f1be2207a64c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.196457 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a597840-28a7-4f26-aa5c-f1be2207a64c-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.196493 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxxb7\" (UniqueName: \"kubernetes.io/projected/4a597840-28a7-4f26-aa5c-f1be2207a64c-kube-api-access-pxxb7\") on node \"crc\" DevicePath \"\"" Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.196507 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a597840-28a7-4f26-aa5c-f1be2207a64c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.662993 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mj2b" event={"ID":"4a597840-28a7-4f26-aa5c-f1be2207a64c","Type":"ContainerDied","Data":"452787c301a9a4edcc9111db967455041cf522bd5ec0e85af6e3476758d30f0d"} Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.663063 4784 scope.go:117] "RemoveContainer" containerID="8544e8c2ecebda0f8d8c0c7afc63bdde391c6f203d290ca5bde36048705b79ed" Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.663292 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5mj2b" Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.690204 4784 scope.go:117] "RemoveContainer" containerID="a50917111c6c2ae93046bb31d64838c0238aa21d70b3340d416df0e6a396f4dd" Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.713592 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5mj2b"] Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.718193 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5mj2b"] Jan 06 08:54:55 crc kubenswrapper[4784]: I0106 08:54:55.720206 4784 scope.go:117] "RemoveContainer" containerID="33f1427e9e438415e21bb43003f8813c6f71dd5791df97824fe2e09be1f7123b" Jan 06 08:54:56 crc kubenswrapper[4784]: I0106 08:54:56.329056 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a597840-28a7-4f26-aa5c-f1be2207a64c" path="/var/lib/kubelet/pods/4a597840-28a7-4f26-aa5c-f1be2207a64c/volumes" Jan 06 08:55:01 crc kubenswrapper[4784]: I0106 08:55:01.312603 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:55:01 crc kubenswrapper[4784]: E0106 08:55:01.313374 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:55:12 crc kubenswrapper[4784]: I0106 08:55:12.312739 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:55:12 crc kubenswrapper[4784]: E0106 08:55:12.313727 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:55:26 crc kubenswrapper[4784]: I0106 08:55:26.312446 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:55:26 crc kubenswrapper[4784]: E0106 08:55:26.313433 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:55:41 crc kubenswrapper[4784]: I0106 08:55:41.312690 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:55:41 crc kubenswrapper[4784]: E0106 08:55:41.313448 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:55:53 crc kubenswrapper[4784]: I0106 08:55:53.312655 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:55:53 crc kubenswrapper[4784]: E0106 08:55:53.313199 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:56:05 crc kubenswrapper[4784]: I0106 08:56:05.312159 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:56:05 crc kubenswrapper[4784]: E0106 08:56:05.313476 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:56:18 crc kubenswrapper[4784]: I0106 08:56:18.319643 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:56:18 crc kubenswrapper[4784]: E0106 08:56:18.320763 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:56:30 crc kubenswrapper[4784]: I0106 08:56:30.312820 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:56:30 crc kubenswrapper[4784]: E0106 08:56:30.313771 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:56:45 crc kubenswrapper[4784]: I0106 08:56:45.312773 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:56:45 crc kubenswrapper[4784]: E0106 08:56:45.314095 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:56:59 crc kubenswrapper[4784]: I0106 08:56:59.313221 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:56:59 crc kubenswrapper[4784]: E0106 08:56:59.314436 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:57:10 crc kubenswrapper[4784]: I0106 08:57:10.313496 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:57:10 crc kubenswrapper[4784]: E0106 08:57:10.315233 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:57:21 crc kubenswrapper[4784]: I0106 08:57:21.312719 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:57:21 crc kubenswrapper[4784]: E0106 08:57:21.313880 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:57:33 crc kubenswrapper[4784]: I0106 08:57:33.312320 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:57:33 crc kubenswrapper[4784]: E0106 08:57:33.313199 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:57:44 crc kubenswrapper[4784]: I0106 08:57:44.312173 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:57:44 crc kubenswrapper[4784]: E0106 08:57:44.312803 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:57:56 crc kubenswrapper[4784]: I0106 08:57:56.313198 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:57:56 crc kubenswrapper[4784]: E0106 08:57:56.314009 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:58:09 crc kubenswrapper[4784]: I0106 08:58:09.312015 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:58:09 crc kubenswrapper[4784]: E0106 08:58:09.312929 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 08:58:22 crc kubenswrapper[4784]: I0106 08:58:22.313019 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 08:58:22 crc kubenswrapper[4784]: I0106 08:58:22.560981 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"bfa3f3f1328d6c0a43fe66b88172a904bd4cd12dead5c921ec7afe80da34e82d"} Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.160178 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l"] Jan 06 09:00:00 crc kubenswrapper[4784]: E0106 09:00:00.164279 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a597840-28a7-4f26-aa5c-f1be2207a64c" containerName="extract-content" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.164306 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a597840-28a7-4f26-aa5c-f1be2207a64c" containerName="extract-content" Jan 06 09:00:00 crc kubenswrapper[4784]: E0106 09:00:00.164337 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a597840-28a7-4f26-aa5c-f1be2207a64c" containerName="registry-server" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.164349 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a597840-28a7-4f26-aa5c-f1be2207a64c" containerName="registry-server" Jan 06 09:00:00 crc kubenswrapper[4784]: E0106 09:00:00.164387 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a597840-28a7-4f26-aa5c-f1be2207a64c" containerName="extract-utilities" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.164400 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a597840-28a7-4f26-aa5c-f1be2207a64c" containerName="extract-utilities" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.164664 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a597840-28a7-4f26-aa5c-f1be2207a64c" containerName="registry-server" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.165467 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.167222 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.168660 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.170650 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l"] Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.319127 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca13c898-752c-4644-b976-ca654199d139-secret-volume\") pod \"collect-profiles-29461500-k5j6l\" (UID: \"ca13c898-752c-4644-b976-ca654199d139\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.319249 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98j46\" (UniqueName: \"kubernetes.io/projected/ca13c898-752c-4644-b976-ca654199d139-kube-api-access-98j46\") pod \"collect-profiles-29461500-k5j6l\" (UID: \"ca13c898-752c-4644-b976-ca654199d139\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.319396 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca13c898-752c-4644-b976-ca654199d139-config-volume\") pod \"collect-profiles-29461500-k5j6l\" (UID: \"ca13c898-752c-4644-b976-ca654199d139\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.420793 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca13c898-752c-4644-b976-ca654199d139-secret-volume\") pod \"collect-profiles-29461500-k5j6l\" (UID: \"ca13c898-752c-4644-b976-ca654199d139\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.420901 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98j46\" (UniqueName: \"kubernetes.io/projected/ca13c898-752c-4644-b976-ca654199d139-kube-api-access-98j46\") pod \"collect-profiles-29461500-k5j6l\" (UID: \"ca13c898-752c-4644-b976-ca654199d139\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.421001 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca13c898-752c-4644-b976-ca654199d139-config-volume\") pod \"collect-profiles-29461500-k5j6l\" (UID: \"ca13c898-752c-4644-b976-ca654199d139\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.422016 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca13c898-752c-4644-b976-ca654199d139-config-volume\") pod \"collect-profiles-29461500-k5j6l\" (UID: \"ca13c898-752c-4644-b976-ca654199d139\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.432029 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca13c898-752c-4644-b976-ca654199d139-secret-volume\") pod \"collect-profiles-29461500-k5j6l\" (UID: \"ca13c898-752c-4644-b976-ca654199d139\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.437390 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98j46\" (UniqueName: \"kubernetes.io/projected/ca13c898-752c-4644-b976-ca654199d139-kube-api-access-98j46\") pod \"collect-profiles-29461500-k5j6l\" (UID: \"ca13c898-752c-4644-b976-ca654199d139\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.496339 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" Jan 06 09:00:00 crc kubenswrapper[4784]: I0106 09:00:00.971169 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l"] Jan 06 09:00:01 crc kubenswrapper[4784]: I0106 09:00:01.469478 4784 generic.go:334] "Generic (PLEG): container finished" podID="ca13c898-752c-4644-b976-ca654199d139" containerID="68927c9761c11ee126ab423e136b363ffc3052cada487e5f84574ba9649cc7cd" exitCode=0 Jan 06 09:00:01 crc kubenswrapper[4784]: I0106 09:00:01.469526 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" event={"ID":"ca13c898-752c-4644-b976-ca654199d139","Type":"ContainerDied","Data":"68927c9761c11ee126ab423e136b363ffc3052cada487e5f84574ba9649cc7cd"} Jan 06 09:00:01 crc kubenswrapper[4784]: I0106 09:00:01.469578 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" event={"ID":"ca13c898-752c-4644-b976-ca654199d139","Type":"ContainerStarted","Data":"aa31c8a46d096390c07331fe5c3aa873a1a3b4f9ceb462449e324d9d6be7d69e"} Jan 06 09:00:02 crc kubenswrapper[4784]: I0106 09:00:02.797217 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" Jan 06 09:00:02 crc kubenswrapper[4784]: I0106 09:00:02.857357 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca13c898-752c-4644-b976-ca654199d139-secret-volume\") pod \"ca13c898-752c-4644-b976-ca654199d139\" (UID: \"ca13c898-752c-4644-b976-ca654199d139\") " Jan 06 09:00:02 crc kubenswrapper[4784]: I0106 09:00:02.857621 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-98j46\" (UniqueName: \"kubernetes.io/projected/ca13c898-752c-4644-b976-ca654199d139-kube-api-access-98j46\") pod \"ca13c898-752c-4644-b976-ca654199d139\" (UID: \"ca13c898-752c-4644-b976-ca654199d139\") " Jan 06 09:00:02 crc kubenswrapper[4784]: I0106 09:00:02.857689 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca13c898-752c-4644-b976-ca654199d139-config-volume\") pod \"ca13c898-752c-4644-b976-ca654199d139\" (UID: \"ca13c898-752c-4644-b976-ca654199d139\") " Jan 06 09:00:02 crc kubenswrapper[4784]: I0106 09:00:02.858504 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca13c898-752c-4644-b976-ca654199d139-config-volume" (OuterVolumeSpecName: "config-volume") pod "ca13c898-752c-4644-b976-ca654199d139" (UID: "ca13c898-752c-4644-b976-ca654199d139"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:00:02 crc kubenswrapper[4784]: I0106 09:00:02.863061 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca13c898-752c-4644-b976-ca654199d139-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ca13c898-752c-4644-b976-ca654199d139" (UID: "ca13c898-752c-4644-b976-ca654199d139"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:00:02 crc kubenswrapper[4784]: I0106 09:00:02.864437 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca13c898-752c-4644-b976-ca654199d139-kube-api-access-98j46" (OuterVolumeSpecName: "kube-api-access-98j46") pod "ca13c898-752c-4644-b976-ca654199d139" (UID: "ca13c898-752c-4644-b976-ca654199d139"). InnerVolumeSpecName "kube-api-access-98j46". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:00:02 crc kubenswrapper[4784]: I0106 09:00:02.959154 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-98j46\" (UniqueName: \"kubernetes.io/projected/ca13c898-752c-4644-b976-ca654199d139-kube-api-access-98j46\") on node \"crc\" DevicePath \"\"" Jan 06 09:00:02 crc kubenswrapper[4784]: I0106 09:00:02.959203 4784 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca13c898-752c-4644-b976-ca654199d139-config-volume\") on node \"crc\" DevicePath \"\"" Jan 06 09:00:02 crc kubenswrapper[4784]: I0106 09:00:02.959220 4784 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca13c898-752c-4644-b976-ca654199d139-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 06 09:00:03 crc kubenswrapper[4784]: I0106 09:00:03.488662 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" event={"ID":"ca13c898-752c-4644-b976-ca654199d139","Type":"ContainerDied","Data":"aa31c8a46d096390c07331fe5c3aa873a1a3b4f9ceb462449e324d9d6be7d69e"} Jan 06 09:00:03 crc kubenswrapper[4784]: I0106 09:00:03.488701 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aa31c8a46d096390c07331fe5c3aa873a1a3b4f9ceb462449e324d9d6be7d69e" Jan 06 09:00:03 crc kubenswrapper[4784]: I0106 09:00:03.488776 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l" Jan 06 09:00:03 crc kubenswrapper[4784]: I0106 09:00:03.891907 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m"] Jan 06 09:00:03 crc kubenswrapper[4784]: I0106 09:00:03.898931 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461455-dcj5m"] Jan 06 09:00:04 crc kubenswrapper[4784]: I0106 09:00:04.321630 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d0f11e4-cf5f-414f-ab5c-71c303b6774c" path="/var/lib/kubelet/pods/8d0f11e4-cf5f-414f-ab5c-71c303b6774c/volumes" Jan 06 09:00:15 crc kubenswrapper[4784]: I0106 09:00:15.844803 4784 scope.go:117] "RemoveContainer" containerID="8b05bc1f13c8684d84a204ea94f2b3e2e35a777973fd01629dedaa28889ae5a2" Jan 06 09:00:15 crc kubenswrapper[4784]: I0106 09:00:15.882867 4784 scope.go:117] "RemoveContainer" containerID="899cda17276079630624cd1538ddc73bdf2b5c344d788f36b118ea6ebafad9e9" Jan 06 09:00:15 crc kubenswrapper[4784]: I0106 09:00:15.931355 4784 scope.go:117] "RemoveContainer" containerID="1556086ccc612aa564611b6d7d8d5465665e52c9bc252f1bb534290d45974d57" Jan 06 09:00:15 crc kubenswrapper[4784]: I0106 09:00:15.955637 4784 scope.go:117] "RemoveContainer" containerID="d3d8540aded578b58aa58fbdf29905af4299955518a98a4b414dacf85856725b" Jan 06 09:00:44 crc kubenswrapper[4784]: I0106 09:00:44.351608 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:00:44 crc kubenswrapper[4784]: I0106 09:00:44.352225 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:01:14 crc kubenswrapper[4784]: I0106 09:01:14.351638 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:01:14 crc kubenswrapper[4784]: I0106 09:01:14.352410 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:01:44 crc kubenswrapper[4784]: I0106 09:01:44.351535 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:01:44 crc kubenswrapper[4784]: I0106 09:01:44.352435 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:01:44 crc kubenswrapper[4784]: I0106 09:01:44.352507 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 09:01:44 crc kubenswrapper[4784]: I0106 09:01:44.354818 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bfa3f3f1328d6c0a43fe66b88172a904bd4cd12dead5c921ec7afe80da34e82d"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 09:01:44 crc kubenswrapper[4784]: I0106 09:01:44.354943 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://bfa3f3f1328d6c0a43fe66b88172a904bd4cd12dead5c921ec7afe80da34e82d" gracePeriod=600 Jan 06 09:01:44 crc kubenswrapper[4784]: I0106 09:01:44.518080 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="bfa3f3f1328d6c0a43fe66b88172a904bd4cd12dead5c921ec7afe80da34e82d" exitCode=0 Jan 06 09:01:44 crc kubenswrapper[4784]: I0106 09:01:44.518125 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"bfa3f3f1328d6c0a43fe66b88172a904bd4cd12dead5c921ec7afe80da34e82d"} Jan 06 09:01:44 crc kubenswrapper[4784]: I0106 09:01:44.518161 4784 scope.go:117] "RemoveContainer" containerID="fdb4ef8cea73f21514d7da8e8069f9a6a48f03d4d79d511d4d5d8a00d13accef" Jan 06 09:01:45 crc kubenswrapper[4784]: I0106 09:01:45.534600 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285"} Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.069624 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kmfsm"] Jan 06 09:01:51 crc kubenswrapper[4784]: E0106 09:01:51.070529 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca13c898-752c-4644-b976-ca654199d139" containerName="collect-profiles" Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.070561 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca13c898-752c-4644-b976-ca654199d139" containerName="collect-profiles" Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.070698 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca13c898-752c-4644-b976-ca654199d139" containerName="collect-profiles" Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.071707 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.096011 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kmfsm"] Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.258345 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a71b80b3-48e4-49ea-bec6-57fcafc5c232-utilities\") pod \"community-operators-kmfsm\" (UID: \"a71b80b3-48e4-49ea-bec6-57fcafc5c232\") " pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.258394 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szq8g\" (UniqueName: \"kubernetes.io/projected/a71b80b3-48e4-49ea-bec6-57fcafc5c232-kube-api-access-szq8g\") pod \"community-operators-kmfsm\" (UID: \"a71b80b3-48e4-49ea-bec6-57fcafc5c232\") " pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.258434 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a71b80b3-48e4-49ea-bec6-57fcafc5c232-catalog-content\") pod \"community-operators-kmfsm\" (UID: \"a71b80b3-48e4-49ea-bec6-57fcafc5c232\") " pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.360313 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a71b80b3-48e4-49ea-bec6-57fcafc5c232-utilities\") pod \"community-operators-kmfsm\" (UID: \"a71b80b3-48e4-49ea-bec6-57fcafc5c232\") " pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.360390 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szq8g\" (UniqueName: \"kubernetes.io/projected/a71b80b3-48e4-49ea-bec6-57fcafc5c232-kube-api-access-szq8g\") pod \"community-operators-kmfsm\" (UID: \"a71b80b3-48e4-49ea-bec6-57fcafc5c232\") " pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.360472 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a71b80b3-48e4-49ea-bec6-57fcafc5c232-catalog-content\") pod \"community-operators-kmfsm\" (UID: \"a71b80b3-48e4-49ea-bec6-57fcafc5c232\") " pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.360911 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a71b80b3-48e4-49ea-bec6-57fcafc5c232-utilities\") pod \"community-operators-kmfsm\" (UID: \"a71b80b3-48e4-49ea-bec6-57fcafc5c232\") " pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.361044 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a71b80b3-48e4-49ea-bec6-57fcafc5c232-catalog-content\") pod \"community-operators-kmfsm\" (UID: \"a71b80b3-48e4-49ea-bec6-57fcafc5c232\") " pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.392781 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szq8g\" (UniqueName: \"kubernetes.io/projected/a71b80b3-48e4-49ea-bec6-57fcafc5c232-kube-api-access-szq8g\") pod \"community-operators-kmfsm\" (UID: \"a71b80b3-48e4-49ea-bec6-57fcafc5c232\") " pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.394694 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:01:51 crc kubenswrapper[4784]: I0106 09:01:51.951915 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kmfsm"] Jan 06 09:01:51 crc kubenswrapper[4784]: W0106 09:01:51.956525 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda71b80b3_48e4_49ea_bec6_57fcafc5c232.slice/crio-8e62ab96cd8b7c488abb2abc6ff8cf5f2b82bf2e8b5226b8790dbb0156db0b60 WatchSource:0}: Error finding container 8e62ab96cd8b7c488abb2abc6ff8cf5f2b82bf2e8b5226b8790dbb0156db0b60: Status 404 returned error can't find the container with id 8e62ab96cd8b7c488abb2abc6ff8cf5f2b82bf2e8b5226b8790dbb0156db0b60 Jan 06 09:01:52 crc kubenswrapper[4784]: I0106 09:01:52.593468 4784 generic.go:334] "Generic (PLEG): container finished" podID="a71b80b3-48e4-49ea-bec6-57fcafc5c232" containerID="a086998b2c80e612ddf7d5a7b018d9492629c8ae6337cbb4d10d87c375962ba9" exitCode=0 Jan 06 09:01:52 crc kubenswrapper[4784]: I0106 09:01:52.593579 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kmfsm" event={"ID":"a71b80b3-48e4-49ea-bec6-57fcafc5c232","Type":"ContainerDied","Data":"a086998b2c80e612ddf7d5a7b018d9492629c8ae6337cbb4d10d87c375962ba9"} Jan 06 09:01:52 crc kubenswrapper[4784]: I0106 09:01:52.593875 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kmfsm" event={"ID":"a71b80b3-48e4-49ea-bec6-57fcafc5c232","Type":"ContainerStarted","Data":"8e62ab96cd8b7c488abb2abc6ff8cf5f2b82bf2e8b5226b8790dbb0156db0b60"} Jan 06 09:01:52 crc kubenswrapper[4784]: I0106 09:01:52.595634 4784 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 06 09:01:54 crc kubenswrapper[4784]: I0106 09:01:54.625655 4784 generic.go:334] "Generic (PLEG): container finished" podID="a71b80b3-48e4-49ea-bec6-57fcafc5c232" containerID="1739e404cef69527c5858ee1c81207ed1d6dbb63697a391763e9662fcb12587a" exitCode=0 Jan 06 09:01:54 crc kubenswrapper[4784]: I0106 09:01:54.625733 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kmfsm" event={"ID":"a71b80b3-48e4-49ea-bec6-57fcafc5c232","Type":"ContainerDied","Data":"1739e404cef69527c5858ee1c81207ed1d6dbb63697a391763e9662fcb12587a"} Jan 06 09:01:55 crc kubenswrapper[4784]: I0106 09:01:55.642921 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kmfsm" event={"ID":"a71b80b3-48e4-49ea-bec6-57fcafc5c232","Type":"ContainerStarted","Data":"8be7f830a4888b2695c0c64b80ad15d254edb7a831417c9ffc5defeed8ad9594"} Jan 06 09:01:55 crc kubenswrapper[4784]: I0106 09:01:55.661868 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kmfsm" podStartSLOduration=2.147807164 podStartE2EDuration="4.66184574s" podCreationTimestamp="2026-01-06 09:01:51 +0000 UTC" firstStartedPulling="2026-01-06 09:01:52.59534546 +0000 UTC m=+2814.641518297" lastFinishedPulling="2026-01-06 09:01:55.109384036 +0000 UTC m=+2817.155556873" observedRunningTime="2026-01-06 09:01:55.660594791 +0000 UTC m=+2817.706767628" watchObservedRunningTime="2026-01-06 09:01:55.66184574 +0000 UTC m=+2817.708018577" Jan 06 09:02:01 crc kubenswrapper[4784]: I0106 09:02:01.395993 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:02:01 crc kubenswrapper[4784]: I0106 09:02:01.398036 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:02:01 crc kubenswrapper[4784]: I0106 09:02:01.455040 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:02:01 crc kubenswrapper[4784]: I0106 09:02:01.754888 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:02:01 crc kubenswrapper[4784]: I0106 09:02:01.813887 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kmfsm"] Jan 06 09:02:03 crc kubenswrapper[4784]: I0106 09:02:03.710001 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kmfsm" podUID="a71b80b3-48e4-49ea-bec6-57fcafc5c232" containerName="registry-server" containerID="cri-o://8be7f830a4888b2695c0c64b80ad15d254edb7a831417c9ffc5defeed8ad9594" gracePeriod=2 Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.143513 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.284285 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a71b80b3-48e4-49ea-bec6-57fcafc5c232-catalog-content\") pod \"a71b80b3-48e4-49ea-bec6-57fcafc5c232\" (UID: \"a71b80b3-48e4-49ea-bec6-57fcafc5c232\") " Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.284365 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a71b80b3-48e4-49ea-bec6-57fcafc5c232-utilities\") pod \"a71b80b3-48e4-49ea-bec6-57fcafc5c232\" (UID: \"a71b80b3-48e4-49ea-bec6-57fcafc5c232\") " Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.286043 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a71b80b3-48e4-49ea-bec6-57fcafc5c232-utilities" (OuterVolumeSpecName: "utilities") pod "a71b80b3-48e4-49ea-bec6-57fcafc5c232" (UID: "a71b80b3-48e4-49ea-bec6-57fcafc5c232"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.286165 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szq8g\" (UniqueName: \"kubernetes.io/projected/a71b80b3-48e4-49ea-bec6-57fcafc5c232-kube-api-access-szq8g\") pod \"a71b80b3-48e4-49ea-bec6-57fcafc5c232\" (UID: \"a71b80b3-48e4-49ea-bec6-57fcafc5c232\") " Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.287908 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a71b80b3-48e4-49ea-bec6-57fcafc5c232-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.291953 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a71b80b3-48e4-49ea-bec6-57fcafc5c232-kube-api-access-szq8g" (OuterVolumeSpecName: "kube-api-access-szq8g") pod "a71b80b3-48e4-49ea-bec6-57fcafc5c232" (UID: "a71b80b3-48e4-49ea-bec6-57fcafc5c232"). InnerVolumeSpecName "kube-api-access-szq8g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.372827 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a71b80b3-48e4-49ea-bec6-57fcafc5c232-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a71b80b3-48e4-49ea-bec6-57fcafc5c232" (UID: "a71b80b3-48e4-49ea-bec6-57fcafc5c232"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.389763 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szq8g\" (UniqueName: \"kubernetes.io/projected/a71b80b3-48e4-49ea-bec6-57fcafc5c232-kube-api-access-szq8g\") on node \"crc\" DevicePath \"\"" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.389808 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a71b80b3-48e4-49ea-bec6-57fcafc5c232-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.719105 4784 generic.go:334] "Generic (PLEG): container finished" podID="a71b80b3-48e4-49ea-bec6-57fcafc5c232" containerID="8be7f830a4888b2695c0c64b80ad15d254edb7a831417c9ffc5defeed8ad9594" exitCode=0 Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.719164 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kmfsm" event={"ID":"a71b80b3-48e4-49ea-bec6-57fcafc5c232","Type":"ContainerDied","Data":"8be7f830a4888b2695c0c64b80ad15d254edb7a831417c9ffc5defeed8ad9594"} Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.719190 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kmfsm" event={"ID":"a71b80b3-48e4-49ea-bec6-57fcafc5c232","Type":"ContainerDied","Data":"8e62ab96cd8b7c488abb2abc6ff8cf5f2b82bf2e8b5226b8790dbb0156db0b60"} Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.719192 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kmfsm" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.719205 4784 scope.go:117] "RemoveContainer" containerID="8be7f830a4888b2695c0c64b80ad15d254edb7a831417c9ffc5defeed8ad9594" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.746703 4784 scope.go:117] "RemoveContainer" containerID="1739e404cef69527c5858ee1c81207ed1d6dbb63697a391763e9662fcb12587a" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.753528 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kmfsm"] Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.767588 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kmfsm"] Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.770606 4784 scope.go:117] "RemoveContainer" containerID="a086998b2c80e612ddf7d5a7b018d9492629c8ae6337cbb4d10d87c375962ba9" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.814076 4784 scope.go:117] "RemoveContainer" containerID="8be7f830a4888b2695c0c64b80ad15d254edb7a831417c9ffc5defeed8ad9594" Jan 06 09:02:04 crc kubenswrapper[4784]: E0106 09:02:04.815750 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8be7f830a4888b2695c0c64b80ad15d254edb7a831417c9ffc5defeed8ad9594\": container with ID starting with 8be7f830a4888b2695c0c64b80ad15d254edb7a831417c9ffc5defeed8ad9594 not found: ID does not exist" containerID="8be7f830a4888b2695c0c64b80ad15d254edb7a831417c9ffc5defeed8ad9594" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.815788 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8be7f830a4888b2695c0c64b80ad15d254edb7a831417c9ffc5defeed8ad9594"} err="failed to get container status \"8be7f830a4888b2695c0c64b80ad15d254edb7a831417c9ffc5defeed8ad9594\": rpc error: code = NotFound desc = could not find container \"8be7f830a4888b2695c0c64b80ad15d254edb7a831417c9ffc5defeed8ad9594\": container with ID starting with 8be7f830a4888b2695c0c64b80ad15d254edb7a831417c9ffc5defeed8ad9594 not found: ID does not exist" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.815810 4784 scope.go:117] "RemoveContainer" containerID="1739e404cef69527c5858ee1c81207ed1d6dbb63697a391763e9662fcb12587a" Jan 06 09:02:04 crc kubenswrapper[4784]: E0106 09:02:04.816114 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1739e404cef69527c5858ee1c81207ed1d6dbb63697a391763e9662fcb12587a\": container with ID starting with 1739e404cef69527c5858ee1c81207ed1d6dbb63697a391763e9662fcb12587a not found: ID does not exist" containerID="1739e404cef69527c5858ee1c81207ed1d6dbb63697a391763e9662fcb12587a" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.816142 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1739e404cef69527c5858ee1c81207ed1d6dbb63697a391763e9662fcb12587a"} err="failed to get container status \"1739e404cef69527c5858ee1c81207ed1d6dbb63697a391763e9662fcb12587a\": rpc error: code = NotFound desc = could not find container \"1739e404cef69527c5858ee1c81207ed1d6dbb63697a391763e9662fcb12587a\": container with ID starting with 1739e404cef69527c5858ee1c81207ed1d6dbb63697a391763e9662fcb12587a not found: ID does not exist" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.816163 4784 scope.go:117] "RemoveContainer" containerID="a086998b2c80e612ddf7d5a7b018d9492629c8ae6337cbb4d10d87c375962ba9" Jan 06 09:02:04 crc kubenswrapper[4784]: E0106 09:02:04.816369 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a086998b2c80e612ddf7d5a7b018d9492629c8ae6337cbb4d10d87c375962ba9\": container with ID starting with a086998b2c80e612ddf7d5a7b018d9492629c8ae6337cbb4d10d87c375962ba9 not found: ID does not exist" containerID="a086998b2c80e612ddf7d5a7b018d9492629c8ae6337cbb4d10d87c375962ba9" Jan 06 09:02:04 crc kubenswrapper[4784]: I0106 09:02:04.816399 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a086998b2c80e612ddf7d5a7b018d9492629c8ae6337cbb4d10d87c375962ba9"} err="failed to get container status \"a086998b2c80e612ddf7d5a7b018d9492629c8ae6337cbb4d10d87c375962ba9\": rpc error: code = NotFound desc = could not find container \"a086998b2c80e612ddf7d5a7b018d9492629c8ae6337cbb4d10d87c375962ba9\": container with ID starting with a086998b2c80e612ddf7d5a7b018d9492629c8ae6337cbb4d10d87c375962ba9 not found: ID does not exist" Jan 06 09:02:06 crc kubenswrapper[4784]: I0106 09:02:06.322733 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a71b80b3-48e4-49ea-bec6-57fcafc5c232" path="/var/lib/kubelet/pods/a71b80b3-48e4-49ea-bec6-57fcafc5c232/volumes" Jan 06 09:02:27 crc kubenswrapper[4784]: I0106 09:02:27.985682 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2b7ck"] Jan 06 09:02:27 crc kubenswrapper[4784]: E0106 09:02:27.986676 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a71b80b3-48e4-49ea-bec6-57fcafc5c232" containerName="registry-server" Jan 06 09:02:27 crc kubenswrapper[4784]: I0106 09:02:27.986695 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a71b80b3-48e4-49ea-bec6-57fcafc5c232" containerName="registry-server" Jan 06 09:02:27 crc kubenswrapper[4784]: E0106 09:02:27.986719 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a71b80b3-48e4-49ea-bec6-57fcafc5c232" containerName="extract-utilities" Jan 06 09:02:27 crc kubenswrapper[4784]: I0106 09:02:27.986726 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a71b80b3-48e4-49ea-bec6-57fcafc5c232" containerName="extract-utilities" Jan 06 09:02:27 crc kubenswrapper[4784]: E0106 09:02:27.986748 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a71b80b3-48e4-49ea-bec6-57fcafc5c232" containerName="extract-content" Jan 06 09:02:27 crc kubenswrapper[4784]: I0106 09:02:27.986757 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="a71b80b3-48e4-49ea-bec6-57fcafc5c232" containerName="extract-content" Jan 06 09:02:27 crc kubenswrapper[4784]: I0106 09:02:27.986921 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="a71b80b3-48e4-49ea-bec6-57fcafc5c232" containerName="registry-server" Jan 06 09:02:27 crc kubenswrapper[4784]: I0106 09:02:27.987986 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:28 crc kubenswrapper[4784]: I0106 09:02:28.003497 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2b7ck"] Jan 06 09:02:28 crc kubenswrapper[4784]: I0106 09:02:28.084695 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-utilities\") pod \"redhat-marketplace-2b7ck\" (UID: \"92cfc554-a73d-4f1d-b74b-7852b7f6c52c\") " pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:28 crc kubenswrapper[4784]: I0106 09:02:28.084735 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-catalog-content\") pod \"redhat-marketplace-2b7ck\" (UID: \"92cfc554-a73d-4f1d-b74b-7852b7f6c52c\") " pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:28 crc kubenswrapper[4784]: I0106 09:02:28.084800 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsjj6\" (UniqueName: \"kubernetes.io/projected/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-kube-api-access-wsjj6\") pod \"redhat-marketplace-2b7ck\" (UID: \"92cfc554-a73d-4f1d-b74b-7852b7f6c52c\") " pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:28 crc kubenswrapper[4784]: I0106 09:02:28.186151 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-utilities\") pod \"redhat-marketplace-2b7ck\" (UID: \"92cfc554-a73d-4f1d-b74b-7852b7f6c52c\") " pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:28 crc kubenswrapper[4784]: I0106 09:02:28.186199 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-catalog-content\") pod \"redhat-marketplace-2b7ck\" (UID: \"92cfc554-a73d-4f1d-b74b-7852b7f6c52c\") " pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:28 crc kubenswrapper[4784]: I0106 09:02:28.186265 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsjj6\" (UniqueName: \"kubernetes.io/projected/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-kube-api-access-wsjj6\") pod \"redhat-marketplace-2b7ck\" (UID: \"92cfc554-a73d-4f1d-b74b-7852b7f6c52c\") " pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:28 crc kubenswrapper[4784]: I0106 09:02:28.187137 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-catalog-content\") pod \"redhat-marketplace-2b7ck\" (UID: \"92cfc554-a73d-4f1d-b74b-7852b7f6c52c\") " pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:28 crc kubenswrapper[4784]: I0106 09:02:28.187182 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-utilities\") pod \"redhat-marketplace-2b7ck\" (UID: \"92cfc554-a73d-4f1d-b74b-7852b7f6c52c\") " pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:28 crc kubenswrapper[4784]: I0106 09:02:28.207350 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsjj6\" (UniqueName: \"kubernetes.io/projected/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-kube-api-access-wsjj6\") pod \"redhat-marketplace-2b7ck\" (UID: \"92cfc554-a73d-4f1d-b74b-7852b7f6c52c\") " pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:28 crc kubenswrapper[4784]: I0106 09:02:28.321758 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:28 crc kubenswrapper[4784]: I0106 09:02:28.568183 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2b7ck"] Jan 06 09:02:29 crc kubenswrapper[4784]: I0106 09:02:29.112613 4784 generic.go:334] "Generic (PLEG): container finished" podID="92cfc554-a73d-4f1d-b74b-7852b7f6c52c" containerID="b4b1a8628d9ac294e5defe9a2c7627c9de498b621f82450095caa6b11c934a13" exitCode=0 Jan 06 09:02:29 crc kubenswrapper[4784]: I0106 09:02:29.112745 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2b7ck" event={"ID":"92cfc554-a73d-4f1d-b74b-7852b7f6c52c","Type":"ContainerDied","Data":"b4b1a8628d9ac294e5defe9a2c7627c9de498b621f82450095caa6b11c934a13"} Jan 06 09:02:29 crc kubenswrapper[4784]: I0106 09:02:29.112947 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2b7ck" event={"ID":"92cfc554-a73d-4f1d-b74b-7852b7f6c52c","Type":"ContainerStarted","Data":"1f579d41d5e52bd990c682519cca90c5393c50052ad735ba5c439349d8e152d5"} Jan 06 09:02:31 crc kubenswrapper[4784]: I0106 09:02:31.155454 4784 generic.go:334] "Generic (PLEG): container finished" podID="92cfc554-a73d-4f1d-b74b-7852b7f6c52c" containerID="a25cca787e5a997b9cb4a232b66b1e6bb09c17e8566594784c67b5146e3cf6d9" exitCode=0 Jan 06 09:02:31 crc kubenswrapper[4784]: I0106 09:02:31.155561 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2b7ck" event={"ID":"92cfc554-a73d-4f1d-b74b-7852b7f6c52c","Type":"ContainerDied","Data":"a25cca787e5a997b9cb4a232b66b1e6bb09c17e8566594784c67b5146e3cf6d9"} Jan 06 09:02:32 crc kubenswrapper[4784]: I0106 09:02:32.169703 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2b7ck" event={"ID":"92cfc554-a73d-4f1d-b74b-7852b7f6c52c","Type":"ContainerStarted","Data":"c0b6d71ac0a38ee6e4ed74535a98825feec51f41668244f4ad812178c1f71fc5"} Jan 06 09:02:32 crc kubenswrapper[4784]: I0106 09:02:32.206917 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2b7ck" podStartSLOduration=2.494299588 podStartE2EDuration="5.206888464s" podCreationTimestamp="2026-01-06 09:02:27 +0000 UTC" firstStartedPulling="2026-01-06 09:02:29.114614641 +0000 UTC m=+2851.160787498" lastFinishedPulling="2026-01-06 09:02:31.827203537 +0000 UTC m=+2853.873376374" observedRunningTime="2026-01-06 09:02:32.19745416 +0000 UTC m=+2854.243627067" watchObservedRunningTime="2026-01-06 09:02:32.206888464 +0000 UTC m=+2854.253061341" Jan 06 09:02:38 crc kubenswrapper[4784]: I0106 09:02:38.329913 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:38 crc kubenswrapper[4784]: I0106 09:02:38.330834 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:38 crc kubenswrapper[4784]: I0106 09:02:38.394768 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:39 crc kubenswrapper[4784]: I0106 09:02:39.297682 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:39 crc kubenswrapper[4784]: I0106 09:02:39.350593 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2b7ck"] Jan 06 09:02:41 crc kubenswrapper[4784]: I0106 09:02:41.244839 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2b7ck" podUID="92cfc554-a73d-4f1d-b74b-7852b7f6c52c" containerName="registry-server" containerID="cri-o://c0b6d71ac0a38ee6e4ed74535a98825feec51f41668244f4ad812178c1f71fc5" gracePeriod=2 Jan 06 09:02:42 crc kubenswrapper[4784]: I0106 09:02:42.815000 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:42 crc kubenswrapper[4784]: I0106 09:02:42.893090 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-catalog-content\") pod \"92cfc554-a73d-4f1d-b74b-7852b7f6c52c\" (UID: \"92cfc554-a73d-4f1d-b74b-7852b7f6c52c\") " Jan 06 09:02:42 crc kubenswrapper[4784]: I0106 09:02:42.893130 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wsjj6\" (UniqueName: \"kubernetes.io/projected/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-kube-api-access-wsjj6\") pod \"92cfc554-a73d-4f1d-b74b-7852b7f6c52c\" (UID: \"92cfc554-a73d-4f1d-b74b-7852b7f6c52c\") " Jan 06 09:02:42 crc kubenswrapper[4784]: I0106 09:02:42.893154 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-utilities\") pod \"92cfc554-a73d-4f1d-b74b-7852b7f6c52c\" (UID: \"92cfc554-a73d-4f1d-b74b-7852b7f6c52c\") " Jan 06 09:02:42 crc kubenswrapper[4784]: I0106 09:02:42.894228 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-utilities" (OuterVolumeSpecName: "utilities") pod "92cfc554-a73d-4f1d-b74b-7852b7f6c52c" (UID: "92cfc554-a73d-4f1d-b74b-7852b7f6c52c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:02:42 crc kubenswrapper[4784]: I0106 09:02:42.902732 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-kube-api-access-wsjj6" (OuterVolumeSpecName: "kube-api-access-wsjj6") pod "92cfc554-a73d-4f1d-b74b-7852b7f6c52c" (UID: "92cfc554-a73d-4f1d-b74b-7852b7f6c52c"). InnerVolumeSpecName "kube-api-access-wsjj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:02:42 crc kubenswrapper[4784]: I0106 09:02:42.915035 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "92cfc554-a73d-4f1d-b74b-7852b7f6c52c" (UID: "92cfc554-a73d-4f1d-b74b-7852b7f6c52c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:02:42 crc kubenswrapper[4784]: I0106 09:02:42.994372 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:02:42 crc kubenswrapper[4784]: I0106 09:02:42.994426 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wsjj6\" (UniqueName: \"kubernetes.io/projected/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-kube-api-access-wsjj6\") on node \"crc\" DevicePath \"\"" Jan 06 09:02:42 crc kubenswrapper[4784]: I0106 09:02:42.994446 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92cfc554-a73d-4f1d-b74b-7852b7f6c52c-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:02:43 crc kubenswrapper[4784]: I0106 09:02:43.263698 4784 generic.go:334] "Generic (PLEG): container finished" podID="92cfc554-a73d-4f1d-b74b-7852b7f6c52c" containerID="c0b6d71ac0a38ee6e4ed74535a98825feec51f41668244f4ad812178c1f71fc5" exitCode=0 Jan 06 09:02:43 crc kubenswrapper[4784]: I0106 09:02:43.263767 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2b7ck" event={"ID":"92cfc554-a73d-4f1d-b74b-7852b7f6c52c","Type":"ContainerDied","Data":"c0b6d71ac0a38ee6e4ed74535a98825feec51f41668244f4ad812178c1f71fc5"} Jan 06 09:02:43 crc kubenswrapper[4784]: I0106 09:02:43.263816 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2b7ck" event={"ID":"92cfc554-a73d-4f1d-b74b-7852b7f6c52c","Type":"ContainerDied","Data":"1f579d41d5e52bd990c682519cca90c5393c50052ad735ba5c439349d8e152d5"} Jan 06 09:02:43 crc kubenswrapper[4784]: I0106 09:02:43.263862 4784 scope.go:117] "RemoveContainer" containerID="c0b6d71ac0a38ee6e4ed74535a98825feec51f41668244f4ad812178c1f71fc5" Jan 06 09:02:43 crc kubenswrapper[4784]: I0106 09:02:43.263837 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2b7ck" Jan 06 09:02:43 crc kubenswrapper[4784]: I0106 09:02:43.299663 4784 scope.go:117] "RemoveContainer" containerID="a25cca787e5a997b9cb4a232b66b1e6bb09c17e8566594784c67b5146e3cf6d9" Jan 06 09:02:43 crc kubenswrapper[4784]: I0106 09:02:43.324795 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2b7ck"] Jan 06 09:02:43 crc kubenswrapper[4784]: I0106 09:02:43.334247 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2b7ck"] Jan 06 09:02:43 crc kubenswrapper[4784]: I0106 09:02:43.358157 4784 scope.go:117] "RemoveContainer" containerID="b4b1a8628d9ac294e5defe9a2c7627c9de498b621f82450095caa6b11c934a13" Jan 06 09:02:43 crc kubenswrapper[4784]: I0106 09:02:43.379986 4784 scope.go:117] "RemoveContainer" containerID="c0b6d71ac0a38ee6e4ed74535a98825feec51f41668244f4ad812178c1f71fc5" Jan 06 09:02:43 crc kubenswrapper[4784]: E0106 09:02:43.380499 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0b6d71ac0a38ee6e4ed74535a98825feec51f41668244f4ad812178c1f71fc5\": container with ID starting with c0b6d71ac0a38ee6e4ed74535a98825feec51f41668244f4ad812178c1f71fc5 not found: ID does not exist" containerID="c0b6d71ac0a38ee6e4ed74535a98825feec51f41668244f4ad812178c1f71fc5" Jan 06 09:02:43 crc kubenswrapper[4784]: I0106 09:02:43.380581 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0b6d71ac0a38ee6e4ed74535a98825feec51f41668244f4ad812178c1f71fc5"} err="failed to get container status \"c0b6d71ac0a38ee6e4ed74535a98825feec51f41668244f4ad812178c1f71fc5\": rpc error: code = NotFound desc = could not find container \"c0b6d71ac0a38ee6e4ed74535a98825feec51f41668244f4ad812178c1f71fc5\": container with ID starting with c0b6d71ac0a38ee6e4ed74535a98825feec51f41668244f4ad812178c1f71fc5 not found: ID does not exist" Jan 06 09:02:43 crc kubenswrapper[4784]: I0106 09:02:43.380609 4784 scope.go:117] "RemoveContainer" containerID="a25cca787e5a997b9cb4a232b66b1e6bb09c17e8566594784c67b5146e3cf6d9" Jan 06 09:02:43 crc kubenswrapper[4784]: E0106 09:02:43.381035 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a25cca787e5a997b9cb4a232b66b1e6bb09c17e8566594784c67b5146e3cf6d9\": container with ID starting with a25cca787e5a997b9cb4a232b66b1e6bb09c17e8566594784c67b5146e3cf6d9 not found: ID does not exist" containerID="a25cca787e5a997b9cb4a232b66b1e6bb09c17e8566594784c67b5146e3cf6d9" Jan 06 09:02:43 crc kubenswrapper[4784]: I0106 09:02:43.381095 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a25cca787e5a997b9cb4a232b66b1e6bb09c17e8566594784c67b5146e3cf6d9"} err="failed to get container status \"a25cca787e5a997b9cb4a232b66b1e6bb09c17e8566594784c67b5146e3cf6d9\": rpc error: code = NotFound desc = could not find container \"a25cca787e5a997b9cb4a232b66b1e6bb09c17e8566594784c67b5146e3cf6d9\": container with ID starting with a25cca787e5a997b9cb4a232b66b1e6bb09c17e8566594784c67b5146e3cf6d9 not found: ID does not exist" Jan 06 09:02:43 crc kubenswrapper[4784]: I0106 09:02:43.381115 4784 scope.go:117] "RemoveContainer" containerID="b4b1a8628d9ac294e5defe9a2c7627c9de498b621f82450095caa6b11c934a13" Jan 06 09:02:43 crc kubenswrapper[4784]: E0106 09:02:43.381467 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4b1a8628d9ac294e5defe9a2c7627c9de498b621f82450095caa6b11c934a13\": container with ID starting with b4b1a8628d9ac294e5defe9a2c7627c9de498b621f82450095caa6b11c934a13 not found: ID does not exist" containerID="b4b1a8628d9ac294e5defe9a2c7627c9de498b621f82450095caa6b11c934a13" Jan 06 09:02:43 crc kubenswrapper[4784]: I0106 09:02:43.381518 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4b1a8628d9ac294e5defe9a2c7627c9de498b621f82450095caa6b11c934a13"} err="failed to get container status \"b4b1a8628d9ac294e5defe9a2c7627c9de498b621f82450095caa6b11c934a13\": rpc error: code = NotFound desc = could not find container \"b4b1a8628d9ac294e5defe9a2c7627c9de498b621f82450095caa6b11c934a13\": container with ID starting with b4b1a8628d9ac294e5defe9a2c7627c9de498b621f82450095caa6b11c934a13 not found: ID does not exist" Jan 06 09:02:44 crc kubenswrapper[4784]: I0106 09:02:44.322245 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92cfc554-a73d-4f1d-b74b-7852b7f6c52c" path="/var/lib/kubelet/pods/92cfc554-a73d-4f1d-b74b-7852b7f6c52c/volumes" Jan 06 09:03:44 crc kubenswrapper[4784]: I0106 09:03:44.351606 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:03:44 crc kubenswrapper[4784]: I0106 09:03:44.352238 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:04:14 crc kubenswrapper[4784]: I0106 09:04:14.351516 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:04:14 crc kubenswrapper[4784]: I0106 09:04:14.352384 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.209736 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fn8kf"] Jan 06 09:04:42 crc kubenswrapper[4784]: E0106 09:04:42.212916 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92cfc554-a73d-4f1d-b74b-7852b7f6c52c" containerName="extract-content" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.213094 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="92cfc554-a73d-4f1d-b74b-7852b7f6c52c" containerName="extract-content" Jan 06 09:04:42 crc kubenswrapper[4784]: E0106 09:04:42.213258 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92cfc554-a73d-4f1d-b74b-7852b7f6c52c" containerName="registry-server" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.213441 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="92cfc554-a73d-4f1d-b74b-7852b7f6c52c" containerName="registry-server" Jan 06 09:04:42 crc kubenswrapper[4784]: E0106 09:04:42.213685 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92cfc554-a73d-4f1d-b74b-7852b7f6c52c" containerName="extract-utilities" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.213822 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="92cfc554-a73d-4f1d-b74b-7852b7f6c52c" containerName="extract-utilities" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.214202 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="92cfc554-a73d-4f1d-b74b-7852b7f6c52c" containerName="registry-server" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.216900 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.241222 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fn8kf"] Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.302572 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-catalog-content\") pod \"certified-operators-fn8kf\" (UID: \"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502\") " pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.302627 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-utilities\") pod \"certified-operators-fn8kf\" (UID: \"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502\") " pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.302657 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvb5z\" (UniqueName: \"kubernetes.io/projected/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-kube-api-access-bvb5z\") pod \"certified-operators-fn8kf\" (UID: \"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502\") " pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.404473 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvb5z\" (UniqueName: \"kubernetes.io/projected/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-kube-api-access-bvb5z\") pod \"certified-operators-fn8kf\" (UID: \"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502\") " pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.404703 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-catalog-content\") pod \"certified-operators-fn8kf\" (UID: \"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502\") " pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.404740 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-utilities\") pod \"certified-operators-fn8kf\" (UID: \"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502\") " pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.405213 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-utilities\") pod \"certified-operators-fn8kf\" (UID: \"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502\") " pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.406090 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-catalog-content\") pod \"certified-operators-fn8kf\" (UID: \"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502\") " pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.442525 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvb5z\" (UniqueName: \"kubernetes.io/projected/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-kube-api-access-bvb5z\") pod \"certified-operators-fn8kf\" (UID: \"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502\") " pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:42 crc kubenswrapper[4784]: I0106 09:04:42.569121 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:43 crc kubenswrapper[4784]: I0106 09:04:43.058797 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fn8kf"] Jan 06 09:04:43 crc kubenswrapper[4784]: I0106 09:04:43.355343 4784 generic.go:334] "Generic (PLEG): container finished" podID="5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502" containerID="86e9cc720ae9365c7360d932fbbab305561f3435a013b59a08e7bcc2464317c6" exitCode=0 Jan 06 09:04:43 crc kubenswrapper[4784]: I0106 09:04:43.355408 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fn8kf" event={"ID":"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502","Type":"ContainerDied","Data":"86e9cc720ae9365c7360d932fbbab305561f3435a013b59a08e7bcc2464317c6"} Jan 06 09:04:43 crc kubenswrapper[4784]: I0106 09:04:43.355449 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fn8kf" event={"ID":"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502","Type":"ContainerStarted","Data":"37bce420960d958bbfd257fb66b91e9c2bbea4f6dcef3feb0b78c0d94ce62228"} Jan 06 09:04:44 crc kubenswrapper[4784]: I0106 09:04:44.351593 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:04:44 crc kubenswrapper[4784]: I0106 09:04:44.352153 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:04:44 crc kubenswrapper[4784]: I0106 09:04:44.352223 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 09:04:44 crc kubenswrapper[4784]: I0106 09:04:44.353099 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 09:04:44 crc kubenswrapper[4784]: I0106 09:04:44.353175 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" gracePeriod=600 Jan 06 09:04:44 crc kubenswrapper[4784]: I0106 09:04:44.365763 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fn8kf" event={"ID":"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502","Type":"ContainerStarted","Data":"2db06f79366a707895273af8ff0a6612730c77ff5d915aa3cbf6c89fe7d4f609"} Jan 06 09:04:44 crc kubenswrapper[4784]: E0106 09:04:44.482523 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:04:45 crc kubenswrapper[4784]: I0106 09:04:45.377503 4784 generic.go:334] "Generic (PLEG): container finished" podID="5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502" containerID="2db06f79366a707895273af8ff0a6612730c77ff5d915aa3cbf6c89fe7d4f609" exitCode=0 Jan 06 09:04:45 crc kubenswrapper[4784]: I0106 09:04:45.377633 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fn8kf" event={"ID":"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502","Type":"ContainerDied","Data":"2db06f79366a707895273af8ff0a6612730c77ff5d915aa3cbf6c89fe7d4f609"} Jan 06 09:04:45 crc kubenswrapper[4784]: I0106 09:04:45.383619 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" exitCode=0 Jan 06 09:04:45 crc kubenswrapper[4784]: I0106 09:04:45.383672 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285"} Jan 06 09:04:45 crc kubenswrapper[4784]: I0106 09:04:45.383723 4784 scope.go:117] "RemoveContainer" containerID="bfa3f3f1328d6c0a43fe66b88172a904bd4cd12dead5c921ec7afe80da34e82d" Jan 06 09:04:45 crc kubenswrapper[4784]: I0106 09:04:45.384476 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:04:45 crc kubenswrapper[4784]: E0106 09:04:45.384986 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:04:47 crc kubenswrapper[4784]: I0106 09:04:47.404849 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fn8kf" event={"ID":"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502","Type":"ContainerStarted","Data":"0635bb18b9c8fc7d07cf6751b55e563701fe51eac1a55f52d157499a70477987"} Jan 06 09:04:47 crc kubenswrapper[4784]: I0106 09:04:47.429934 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fn8kf" podStartSLOduration=2.486301472 podStartE2EDuration="5.429915377s" podCreationTimestamp="2026-01-06 09:04:42 +0000 UTC" firstStartedPulling="2026-01-06 09:04:43.35812609 +0000 UTC m=+2985.404298967" lastFinishedPulling="2026-01-06 09:04:46.301739915 +0000 UTC m=+2988.347912872" observedRunningTime="2026-01-06 09:04:47.422827517 +0000 UTC m=+2989.469000354" watchObservedRunningTime="2026-01-06 09:04:47.429915377 +0000 UTC m=+2989.476088214" Jan 06 09:04:52 crc kubenswrapper[4784]: I0106 09:04:52.570045 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:52 crc kubenswrapper[4784]: I0106 09:04:52.570634 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:52 crc kubenswrapper[4784]: I0106 09:04:52.620430 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:53 crc kubenswrapper[4784]: I0106 09:04:53.502242 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:53 crc kubenswrapper[4784]: I0106 09:04:53.557460 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fn8kf"] Jan 06 09:04:55 crc kubenswrapper[4784]: I0106 09:04:55.469423 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fn8kf" podUID="5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502" containerName="registry-server" containerID="cri-o://0635bb18b9c8fc7d07cf6751b55e563701fe51eac1a55f52d157499a70477987" gracePeriod=2 Jan 06 09:04:56 crc kubenswrapper[4784]: I0106 09:04:56.484363 4784 generic.go:334] "Generic (PLEG): container finished" podID="5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502" containerID="0635bb18b9c8fc7d07cf6751b55e563701fe51eac1a55f52d157499a70477987" exitCode=0 Jan 06 09:04:56 crc kubenswrapper[4784]: I0106 09:04:56.484479 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fn8kf" event={"ID":"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502","Type":"ContainerDied","Data":"0635bb18b9c8fc7d07cf6751b55e563701fe51eac1a55f52d157499a70477987"} Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.052225 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.132877 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvb5z\" (UniqueName: \"kubernetes.io/projected/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-kube-api-access-bvb5z\") pod \"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502\" (UID: \"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502\") " Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.133630 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-catalog-content\") pod \"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502\" (UID: \"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502\") " Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.133690 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-utilities\") pod \"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502\" (UID: \"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502\") " Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.134762 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-utilities" (OuterVolumeSpecName: "utilities") pod "5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502" (UID: "5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.142756 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-kube-api-access-bvb5z" (OuterVolumeSpecName: "kube-api-access-bvb5z") pod "5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502" (UID: "5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502"). InnerVolumeSpecName "kube-api-access-bvb5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.203493 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502" (UID: "5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.235951 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvb5z\" (UniqueName: \"kubernetes.io/projected/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-kube-api-access-bvb5z\") on node \"crc\" DevicePath \"\"" Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.236527 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.237130 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.498479 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fn8kf" event={"ID":"5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502","Type":"ContainerDied","Data":"37bce420960d958bbfd257fb66b91e9c2bbea4f6dcef3feb0b78c0d94ce62228"} Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.498571 4784 scope.go:117] "RemoveContainer" containerID="0635bb18b9c8fc7d07cf6751b55e563701fe51eac1a55f52d157499a70477987" Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.498595 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fn8kf" Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.528820 4784 scope.go:117] "RemoveContainer" containerID="2db06f79366a707895273af8ff0a6612730c77ff5d915aa3cbf6c89fe7d4f609" Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.541666 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fn8kf"] Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.546777 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fn8kf"] Jan 06 09:04:57 crc kubenswrapper[4784]: I0106 09:04:57.578235 4784 scope.go:117] "RemoveContainer" containerID="86e9cc720ae9365c7360d932fbbab305561f3435a013b59a08e7bcc2464317c6" Jan 06 09:04:57 crc kubenswrapper[4784]: E0106 09:04:57.636926 4784 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5c8c1ce1_bbe9_4f3c_9788_a85dfe60b502.slice/crio-37bce420960d958bbfd257fb66b91e9c2bbea4f6dcef3feb0b78c0d94ce62228\": RecentStats: unable to find data in memory cache]" Jan 06 09:04:58 crc kubenswrapper[4784]: I0106 09:04:58.328758 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502" path="/var/lib/kubelet/pods/5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502/volumes" Jan 06 09:04:59 crc kubenswrapper[4784]: I0106 09:04:59.313008 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:04:59 crc kubenswrapper[4784]: E0106 09:04:59.313718 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:05:13 crc kubenswrapper[4784]: I0106 09:05:13.312835 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:05:13 crc kubenswrapper[4784]: E0106 09:05:13.313752 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:05:27 crc kubenswrapper[4784]: I0106 09:05:27.312519 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:05:27 crc kubenswrapper[4784]: E0106 09:05:27.313152 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:05:39 crc kubenswrapper[4784]: I0106 09:05:39.312843 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:05:39 crc kubenswrapper[4784]: E0106 09:05:39.314343 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:05:51 crc kubenswrapper[4784]: I0106 09:05:51.312187 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:05:51 crc kubenswrapper[4784]: E0106 09:05:51.313022 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:05:52 crc kubenswrapper[4784]: I0106 09:05:52.968577 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jvknf"] Jan 06 09:05:52 crc kubenswrapper[4784]: E0106 09:05:52.969126 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502" containerName="extract-utilities" Jan 06 09:05:52 crc kubenswrapper[4784]: I0106 09:05:52.969137 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502" containerName="extract-utilities" Jan 06 09:05:52 crc kubenswrapper[4784]: E0106 09:05:52.969159 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502" containerName="registry-server" Jan 06 09:05:52 crc kubenswrapper[4784]: I0106 09:05:52.969166 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502" containerName="registry-server" Jan 06 09:05:52 crc kubenswrapper[4784]: E0106 09:05:52.969181 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502" containerName="extract-content" Jan 06 09:05:52 crc kubenswrapper[4784]: I0106 09:05:52.969187 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502" containerName="extract-content" Jan 06 09:05:52 crc kubenswrapper[4784]: I0106 09:05:52.969321 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c8c1ce1-bbe9-4f3c-9788-a85dfe60b502" containerName="registry-server" Jan 06 09:05:52 crc kubenswrapper[4784]: I0106 09:05:52.971177 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:05:52 crc kubenswrapper[4784]: I0106 09:05:52.987446 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jvknf"] Jan 06 09:05:53 crc kubenswrapper[4784]: I0106 09:05:53.060458 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9193f535-4f9a-4d2f-9359-706862c60108-catalog-content\") pod \"redhat-operators-jvknf\" (UID: \"9193f535-4f9a-4d2f-9359-706862c60108\") " pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:05:53 crc kubenswrapper[4784]: I0106 09:05:53.060603 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9193f535-4f9a-4d2f-9359-706862c60108-utilities\") pod \"redhat-operators-jvknf\" (UID: \"9193f535-4f9a-4d2f-9359-706862c60108\") " pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:05:53 crc kubenswrapper[4784]: I0106 09:05:53.060687 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vp54q\" (UniqueName: \"kubernetes.io/projected/9193f535-4f9a-4d2f-9359-706862c60108-kube-api-access-vp54q\") pod \"redhat-operators-jvknf\" (UID: \"9193f535-4f9a-4d2f-9359-706862c60108\") " pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:05:53 crc kubenswrapper[4784]: I0106 09:05:53.163072 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9193f535-4f9a-4d2f-9359-706862c60108-utilities\") pod \"redhat-operators-jvknf\" (UID: \"9193f535-4f9a-4d2f-9359-706862c60108\") " pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:05:53 crc kubenswrapper[4784]: I0106 09:05:53.163145 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vp54q\" (UniqueName: \"kubernetes.io/projected/9193f535-4f9a-4d2f-9359-706862c60108-kube-api-access-vp54q\") pod \"redhat-operators-jvknf\" (UID: \"9193f535-4f9a-4d2f-9359-706862c60108\") " pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:05:53 crc kubenswrapper[4784]: I0106 09:05:53.163217 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9193f535-4f9a-4d2f-9359-706862c60108-catalog-content\") pod \"redhat-operators-jvknf\" (UID: \"9193f535-4f9a-4d2f-9359-706862c60108\") " pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:05:53 crc kubenswrapper[4784]: I0106 09:05:53.163772 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9193f535-4f9a-4d2f-9359-706862c60108-catalog-content\") pod \"redhat-operators-jvknf\" (UID: \"9193f535-4f9a-4d2f-9359-706862c60108\") " pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:05:53 crc kubenswrapper[4784]: I0106 09:05:53.164055 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9193f535-4f9a-4d2f-9359-706862c60108-utilities\") pod \"redhat-operators-jvknf\" (UID: \"9193f535-4f9a-4d2f-9359-706862c60108\") " pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:05:53 crc kubenswrapper[4784]: I0106 09:05:53.183147 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vp54q\" (UniqueName: \"kubernetes.io/projected/9193f535-4f9a-4d2f-9359-706862c60108-kube-api-access-vp54q\") pod \"redhat-operators-jvknf\" (UID: \"9193f535-4f9a-4d2f-9359-706862c60108\") " pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:05:53 crc kubenswrapper[4784]: I0106 09:05:53.300308 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:05:53 crc kubenswrapper[4784]: I0106 09:05:53.782019 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jvknf"] Jan 06 09:05:53 crc kubenswrapper[4784]: I0106 09:05:53.993033 4784 generic.go:334] "Generic (PLEG): container finished" podID="9193f535-4f9a-4d2f-9359-706862c60108" containerID="66af3600faa104352e2b4eecaeb67e52db35af767b3a2f62cd82c01cd2eb7b5b" exitCode=0 Jan 06 09:05:53 crc kubenswrapper[4784]: I0106 09:05:53.993077 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jvknf" event={"ID":"9193f535-4f9a-4d2f-9359-706862c60108","Type":"ContainerDied","Data":"66af3600faa104352e2b4eecaeb67e52db35af767b3a2f62cd82c01cd2eb7b5b"} Jan 06 09:05:53 crc kubenswrapper[4784]: I0106 09:05:53.993101 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jvknf" event={"ID":"9193f535-4f9a-4d2f-9359-706862c60108","Type":"ContainerStarted","Data":"64f830ecb645502221f92683594ac6d9134d6239212b32dad447a3ccc16004df"} Jan 06 09:05:55 crc kubenswrapper[4784]: I0106 09:05:55.003671 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jvknf" event={"ID":"9193f535-4f9a-4d2f-9359-706862c60108","Type":"ContainerStarted","Data":"ed09f3b5c5a565125d95d44f0c3e899875253c4224567d29e1a2d69a9d975755"} Jan 06 09:05:56 crc kubenswrapper[4784]: I0106 09:05:56.015716 4784 generic.go:334] "Generic (PLEG): container finished" podID="9193f535-4f9a-4d2f-9359-706862c60108" containerID="ed09f3b5c5a565125d95d44f0c3e899875253c4224567d29e1a2d69a9d975755" exitCode=0 Jan 06 09:05:56 crc kubenswrapper[4784]: I0106 09:05:56.015764 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jvknf" event={"ID":"9193f535-4f9a-4d2f-9359-706862c60108","Type":"ContainerDied","Data":"ed09f3b5c5a565125d95d44f0c3e899875253c4224567d29e1a2d69a9d975755"} Jan 06 09:05:57 crc kubenswrapper[4784]: I0106 09:05:57.024994 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jvknf" event={"ID":"9193f535-4f9a-4d2f-9359-706862c60108","Type":"ContainerStarted","Data":"cc69f08f4d9552eba762c704fde556e0c68505c8c52107c2412b044bb96e6a00"} Jan 06 09:06:03 crc kubenswrapper[4784]: I0106 09:06:03.301868 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:06:03 crc kubenswrapper[4784]: I0106 09:06:03.302675 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:06:03 crc kubenswrapper[4784]: I0106 09:06:03.379441 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:06:03 crc kubenswrapper[4784]: I0106 09:06:03.407724 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jvknf" podStartSLOduration=8.864160603 podStartE2EDuration="11.407704596s" podCreationTimestamp="2026-01-06 09:05:52 +0000 UTC" firstStartedPulling="2026-01-06 09:05:53.99467287 +0000 UTC m=+3056.040845707" lastFinishedPulling="2026-01-06 09:05:56.538216823 +0000 UTC m=+3058.584389700" observedRunningTime="2026-01-06 09:05:57.047402931 +0000 UTC m=+3059.093575778" watchObservedRunningTime="2026-01-06 09:06:03.407704596 +0000 UTC m=+3065.453877443" Jan 06 09:06:04 crc kubenswrapper[4784]: I0106 09:06:04.146431 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:06:04 crc kubenswrapper[4784]: I0106 09:06:04.218755 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jvknf"] Jan 06 09:06:05 crc kubenswrapper[4784]: I0106 09:06:05.312739 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:06:05 crc kubenswrapper[4784]: E0106 09:06:05.313066 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:06:06 crc kubenswrapper[4784]: I0106 09:06:06.096461 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jvknf" podUID="9193f535-4f9a-4d2f-9359-706862c60108" containerName="registry-server" containerID="cri-o://cc69f08f4d9552eba762c704fde556e0c68505c8c52107c2412b044bb96e6a00" gracePeriod=2 Jan 06 09:06:09 crc kubenswrapper[4784]: I0106 09:06:09.136080 4784 generic.go:334] "Generic (PLEG): container finished" podID="9193f535-4f9a-4d2f-9359-706862c60108" containerID="cc69f08f4d9552eba762c704fde556e0c68505c8c52107c2412b044bb96e6a00" exitCode=0 Jan 06 09:06:09 crc kubenswrapper[4784]: I0106 09:06:09.136147 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jvknf" event={"ID":"9193f535-4f9a-4d2f-9359-706862c60108","Type":"ContainerDied","Data":"cc69f08f4d9552eba762c704fde556e0c68505c8c52107c2412b044bb96e6a00"} Jan 06 09:06:09 crc kubenswrapper[4784]: I0106 09:06:09.222655 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:06:09 crc kubenswrapper[4784]: I0106 09:06:09.414753 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vp54q\" (UniqueName: \"kubernetes.io/projected/9193f535-4f9a-4d2f-9359-706862c60108-kube-api-access-vp54q\") pod \"9193f535-4f9a-4d2f-9359-706862c60108\" (UID: \"9193f535-4f9a-4d2f-9359-706862c60108\") " Jan 06 09:06:09 crc kubenswrapper[4784]: I0106 09:06:09.414838 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9193f535-4f9a-4d2f-9359-706862c60108-utilities\") pod \"9193f535-4f9a-4d2f-9359-706862c60108\" (UID: \"9193f535-4f9a-4d2f-9359-706862c60108\") " Jan 06 09:06:09 crc kubenswrapper[4784]: I0106 09:06:09.414942 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9193f535-4f9a-4d2f-9359-706862c60108-catalog-content\") pod \"9193f535-4f9a-4d2f-9359-706862c60108\" (UID: \"9193f535-4f9a-4d2f-9359-706862c60108\") " Jan 06 09:06:09 crc kubenswrapper[4784]: I0106 09:06:09.416394 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9193f535-4f9a-4d2f-9359-706862c60108-utilities" (OuterVolumeSpecName: "utilities") pod "9193f535-4f9a-4d2f-9359-706862c60108" (UID: "9193f535-4f9a-4d2f-9359-706862c60108"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:06:09 crc kubenswrapper[4784]: I0106 09:06:09.425891 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9193f535-4f9a-4d2f-9359-706862c60108-kube-api-access-vp54q" (OuterVolumeSpecName: "kube-api-access-vp54q") pod "9193f535-4f9a-4d2f-9359-706862c60108" (UID: "9193f535-4f9a-4d2f-9359-706862c60108"). InnerVolumeSpecName "kube-api-access-vp54q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:06:09 crc kubenswrapper[4784]: I0106 09:06:09.516915 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vp54q\" (UniqueName: \"kubernetes.io/projected/9193f535-4f9a-4d2f-9359-706862c60108-kube-api-access-vp54q\") on node \"crc\" DevicePath \"\"" Jan 06 09:06:09 crc kubenswrapper[4784]: I0106 09:06:09.517505 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9193f535-4f9a-4d2f-9359-706862c60108-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:06:09 crc kubenswrapper[4784]: I0106 09:06:09.604756 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9193f535-4f9a-4d2f-9359-706862c60108-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9193f535-4f9a-4d2f-9359-706862c60108" (UID: "9193f535-4f9a-4d2f-9359-706862c60108"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:06:09 crc kubenswrapper[4784]: I0106 09:06:09.618909 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9193f535-4f9a-4d2f-9359-706862c60108-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:06:10 crc kubenswrapper[4784]: I0106 09:06:10.144885 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jvknf" event={"ID":"9193f535-4f9a-4d2f-9359-706862c60108","Type":"ContainerDied","Data":"64f830ecb645502221f92683594ac6d9134d6239212b32dad447a3ccc16004df"} Jan 06 09:06:10 crc kubenswrapper[4784]: I0106 09:06:10.144957 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jvknf" Jan 06 09:06:10 crc kubenswrapper[4784]: I0106 09:06:10.144960 4784 scope.go:117] "RemoveContainer" containerID="cc69f08f4d9552eba762c704fde556e0c68505c8c52107c2412b044bb96e6a00" Jan 06 09:06:10 crc kubenswrapper[4784]: I0106 09:06:10.165891 4784 scope.go:117] "RemoveContainer" containerID="ed09f3b5c5a565125d95d44f0c3e899875253c4224567d29e1a2d69a9d975755" Jan 06 09:06:10 crc kubenswrapper[4784]: I0106 09:06:10.181421 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jvknf"] Jan 06 09:06:10 crc kubenswrapper[4784]: I0106 09:06:10.188866 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jvknf"] Jan 06 09:06:10 crc kubenswrapper[4784]: I0106 09:06:10.210992 4784 scope.go:117] "RemoveContainer" containerID="66af3600faa104352e2b4eecaeb67e52db35af767b3a2f62cd82c01cd2eb7b5b" Jan 06 09:06:10 crc kubenswrapper[4784]: I0106 09:06:10.321814 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9193f535-4f9a-4d2f-9359-706862c60108" path="/var/lib/kubelet/pods/9193f535-4f9a-4d2f-9359-706862c60108/volumes" Jan 06 09:06:18 crc kubenswrapper[4784]: I0106 09:06:18.316620 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:06:18 crc kubenswrapper[4784]: E0106 09:06:18.317842 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:06:29 crc kubenswrapper[4784]: I0106 09:06:29.312029 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:06:29 crc kubenswrapper[4784]: E0106 09:06:29.313061 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:06:42 crc kubenswrapper[4784]: I0106 09:06:42.312739 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:06:42 crc kubenswrapper[4784]: E0106 09:06:42.314803 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:06:55 crc kubenswrapper[4784]: I0106 09:06:55.312532 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:06:55 crc kubenswrapper[4784]: E0106 09:06:55.313458 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:07:07 crc kubenswrapper[4784]: I0106 09:07:07.318516 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:07:07 crc kubenswrapper[4784]: E0106 09:07:07.319269 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:07:22 crc kubenswrapper[4784]: I0106 09:07:22.313380 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:07:22 crc kubenswrapper[4784]: E0106 09:07:22.314311 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:07:34 crc kubenswrapper[4784]: I0106 09:07:34.313044 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:07:34 crc kubenswrapper[4784]: E0106 09:07:34.314133 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:07:45 crc kubenswrapper[4784]: I0106 09:07:45.312512 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:07:45 crc kubenswrapper[4784]: E0106 09:07:45.313644 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:07:58 crc kubenswrapper[4784]: I0106 09:07:58.319382 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:07:58 crc kubenswrapper[4784]: E0106 09:07:58.320625 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:08:14 crc kubenswrapper[4784]: I0106 09:08:14.312524 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:08:14 crc kubenswrapper[4784]: E0106 09:08:14.313582 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:08:26 crc kubenswrapper[4784]: I0106 09:08:26.312872 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:08:26 crc kubenswrapper[4784]: E0106 09:08:26.313855 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:08:41 crc kubenswrapper[4784]: I0106 09:08:41.314685 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:08:41 crc kubenswrapper[4784]: E0106 09:08:41.315716 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:08:53 crc kubenswrapper[4784]: I0106 09:08:53.313222 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:08:53 crc kubenswrapper[4784]: E0106 09:08:53.314201 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:09:06 crc kubenswrapper[4784]: I0106 09:09:06.313297 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:09:06 crc kubenswrapper[4784]: E0106 09:09:06.314304 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:09:18 crc kubenswrapper[4784]: I0106 09:09:18.316775 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:09:18 crc kubenswrapper[4784]: E0106 09:09:18.317579 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:09:32 crc kubenswrapper[4784]: I0106 09:09:32.312585 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:09:32 crc kubenswrapper[4784]: E0106 09:09:32.315134 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:09:43 crc kubenswrapper[4784]: I0106 09:09:43.313235 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:09:43 crc kubenswrapper[4784]: E0106 09:09:43.315491 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:09:56 crc kubenswrapper[4784]: I0106 09:09:56.313202 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:09:57 crc kubenswrapper[4784]: I0106 09:09:57.321939 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"54d6f3700abe17d895a423989a384b6dda55f512a756e39aace475d4033e847e"} Jan 06 09:12:14 crc kubenswrapper[4784]: I0106 09:12:14.350935 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:12:14 crc kubenswrapper[4784]: I0106 09:12:14.351622 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.705983 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pfnt9"] Jan 06 09:12:42 crc kubenswrapper[4784]: E0106 09:12:42.707009 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9193f535-4f9a-4d2f-9359-706862c60108" containerName="extract-content" Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.707031 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="9193f535-4f9a-4d2f-9359-706862c60108" containerName="extract-content" Jan 06 09:12:42 crc kubenswrapper[4784]: E0106 09:12:42.707050 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9193f535-4f9a-4d2f-9359-706862c60108" containerName="extract-utilities" Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.707063 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="9193f535-4f9a-4d2f-9359-706862c60108" containerName="extract-utilities" Jan 06 09:12:42 crc kubenswrapper[4784]: E0106 09:12:42.707114 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9193f535-4f9a-4d2f-9359-706862c60108" containerName="registry-server" Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.707128 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="9193f535-4f9a-4d2f-9359-706862c60108" containerName="registry-server" Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.707297 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="9193f535-4f9a-4d2f-9359-706862c60108" containerName="registry-server" Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.708715 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.739688 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pfnt9"] Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.749086 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed977cc0-b1e9-4c53-b660-846ec8f7df28-utilities\") pod \"redhat-marketplace-pfnt9\" (UID: \"ed977cc0-b1e9-4c53-b660-846ec8f7df28\") " pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.749147 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9756q\" (UniqueName: \"kubernetes.io/projected/ed977cc0-b1e9-4c53-b660-846ec8f7df28-kube-api-access-9756q\") pod \"redhat-marketplace-pfnt9\" (UID: \"ed977cc0-b1e9-4c53-b660-846ec8f7df28\") " pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.749329 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed977cc0-b1e9-4c53-b660-846ec8f7df28-catalog-content\") pod \"redhat-marketplace-pfnt9\" (UID: \"ed977cc0-b1e9-4c53-b660-846ec8f7df28\") " pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.851643 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed977cc0-b1e9-4c53-b660-846ec8f7df28-utilities\") pod \"redhat-marketplace-pfnt9\" (UID: \"ed977cc0-b1e9-4c53-b660-846ec8f7df28\") " pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.851707 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9756q\" (UniqueName: \"kubernetes.io/projected/ed977cc0-b1e9-4c53-b660-846ec8f7df28-kube-api-access-9756q\") pod \"redhat-marketplace-pfnt9\" (UID: \"ed977cc0-b1e9-4c53-b660-846ec8f7df28\") " pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.851772 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed977cc0-b1e9-4c53-b660-846ec8f7df28-catalog-content\") pod \"redhat-marketplace-pfnt9\" (UID: \"ed977cc0-b1e9-4c53-b660-846ec8f7df28\") " pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.852974 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed977cc0-b1e9-4c53-b660-846ec8f7df28-utilities\") pod \"redhat-marketplace-pfnt9\" (UID: \"ed977cc0-b1e9-4c53-b660-846ec8f7df28\") " pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.852990 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed977cc0-b1e9-4c53-b660-846ec8f7df28-catalog-content\") pod \"redhat-marketplace-pfnt9\" (UID: \"ed977cc0-b1e9-4c53-b660-846ec8f7df28\") " pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:42 crc kubenswrapper[4784]: I0106 09:12:42.879385 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9756q\" (UniqueName: \"kubernetes.io/projected/ed977cc0-b1e9-4c53-b660-846ec8f7df28-kube-api-access-9756q\") pod \"redhat-marketplace-pfnt9\" (UID: \"ed977cc0-b1e9-4c53-b660-846ec8f7df28\") " pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:43 crc kubenswrapper[4784]: I0106 09:12:43.046747 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:43 crc kubenswrapper[4784]: I0106 09:12:43.497980 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pfnt9"] Jan 06 09:12:43 crc kubenswrapper[4784]: I0106 09:12:43.862313 4784 generic.go:334] "Generic (PLEG): container finished" podID="ed977cc0-b1e9-4c53-b660-846ec8f7df28" containerID="5a54245355d6eeb2cf6123f56567a9befaf0ee975b21eb3881600668f8b264e4" exitCode=0 Jan 06 09:12:43 crc kubenswrapper[4784]: I0106 09:12:43.862374 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pfnt9" event={"ID":"ed977cc0-b1e9-4c53-b660-846ec8f7df28","Type":"ContainerDied","Data":"5a54245355d6eeb2cf6123f56567a9befaf0ee975b21eb3881600668f8b264e4"} Jan 06 09:12:43 crc kubenswrapper[4784]: I0106 09:12:43.862412 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pfnt9" event={"ID":"ed977cc0-b1e9-4c53-b660-846ec8f7df28","Type":"ContainerStarted","Data":"2f9e7f66464bd6cb1e5115637d53da42083c9326a796065f4d9cede71fcad350"} Jan 06 09:12:43 crc kubenswrapper[4784]: I0106 09:12:43.864629 4784 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 06 09:12:44 crc kubenswrapper[4784]: I0106 09:12:44.351669 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:12:44 crc kubenswrapper[4784]: I0106 09:12:44.351763 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:12:44 crc kubenswrapper[4784]: I0106 09:12:44.875921 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pfnt9" event={"ID":"ed977cc0-b1e9-4c53-b660-846ec8f7df28","Type":"ContainerStarted","Data":"9ef60d8e4684f081ad96d39d4da3ebd22afce5fcee64a9ca08b3c9ca41ffb8a0"} Jan 06 09:12:45 crc kubenswrapper[4784]: I0106 09:12:45.888646 4784 generic.go:334] "Generic (PLEG): container finished" podID="ed977cc0-b1e9-4c53-b660-846ec8f7df28" containerID="9ef60d8e4684f081ad96d39d4da3ebd22afce5fcee64a9ca08b3c9ca41ffb8a0" exitCode=0 Jan 06 09:12:45 crc kubenswrapper[4784]: I0106 09:12:45.888724 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pfnt9" event={"ID":"ed977cc0-b1e9-4c53-b660-846ec8f7df28","Type":"ContainerDied","Data":"9ef60d8e4684f081ad96d39d4da3ebd22afce5fcee64a9ca08b3c9ca41ffb8a0"} Jan 06 09:12:46 crc kubenswrapper[4784]: I0106 09:12:46.902182 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pfnt9" event={"ID":"ed977cc0-b1e9-4c53-b660-846ec8f7df28","Type":"ContainerStarted","Data":"994fd6c1db2bda0dda9dd777e9255e744faf1d2707cbe92d6f6bc91faf8aef0e"} Jan 06 09:12:46 crc kubenswrapper[4784]: I0106 09:12:46.935757 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pfnt9" podStartSLOduration=2.369536991 podStartE2EDuration="4.935736291s" podCreationTimestamp="2026-01-06 09:12:42 +0000 UTC" firstStartedPulling="2026-01-06 09:12:43.864230384 +0000 UTC m=+3465.910403251" lastFinishedPulling="2026-01-06 09:12:46.430429714 +0000 UTC m=+3468.476602551" observedRunningTime="2026-01-06 09:12:46.928828046 +0000 UTC m=+3468.975000923" watchObservedRunningTime="2026-01-06 09:12:46.935736291 +0000 UTC m=+3468.981909148" Jan 06 09:12:53 crc kubenswrapper[4784]: I0106 09:12:53.047501 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:53 crc kubenswrapper[4784]: I0106 09:12:53.048175 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:53 crc kubenswrapper[4784]: I0106 09:12:53.115070 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:54 crc kubenswrapper[4784]: I0106 09:12:54.039696 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:54 crc kubenswrapper[4784]: I0106 09:12:54.114039 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pfnt9"] Jan 06 09:12:56 crc kubenswrapper[4784]: I0106 09:12:55.999607 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pfnt9" podUID="ed977cc0-b1e9-4c53-b660-846ec8f7df28" containerName="registry-server" containerID="cri-o://994fd6c1db2bda0dda9dd777e9255e744faf1d2707cbe92d6f6bc91faf8aef0e" gracePeriod=2 Jan 06 09:12:56 crc kubenswrapper[4784]: I0106 09:12:56.978140 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.020023 4784 generic.go:334] "Generic (PLEG): container finished" podID="ed977cc0-b1e9-4c53-b660-846ec8f7df28" containerID="994fd6c1db2bda0dda9dd777e9255e744faf1d2707cbe92d6f6bc91faf8aef0e" exitCode=0 Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.020092 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pfnt9" event={"ID":"ed977cc0-b1e9-4c53-b660-846ec8f7df28","Type":"ContainerDied","Data":"994fd6c1db2bda0dda9dd777e9255e744faf1d2707cbe92d6f6bc91faf8aef0e"} Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.020110 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pfnt9" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.020147 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pfnt9" event={"ID":"ed977cc0-b1e9-4c53-b660-846ec8f7df28","Type":"ContainerDied","Data":"2f9e7f66464bd6cb1e5115637d53da42083c9326a796065f4d9cede71fcad350"} Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.020186 4784 scope.go:117] "RemoveContainer" containerID="994fd6c1db2bda0dda9dd777e9255e744faf1d2707cbe92d6f6bc91faf8aef0e" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.040522 4784 scope.go:117] "RemoveContainer" containerID="9ef60d8e4684f081ad96d39d4da3ebd22afce5fcee64a9ca08b3c9ca41ffb8a0" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.055699 4784 scope.go:117] "RemoveContainer" containerID="5a54245355d6eeb2cf6123f56567a9befaf0ee975b21eb3881600668f8b264e4" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.080030 4784 scope.go:117] "RemoveContainer" containerID="994fd6c1db2bda0dda9dd777e9255e744faf1d2707cbe92d6f6bc91faf8aef0e" Jan 06 09:12:57 crc kubenswrapper[4784]: E0106 09:12:57.080705 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"994fd6c1db2bda0dda9dd777e9255e744faf1d2707cbe92d6f6bc91faf8aef0e\": container with ID starting with 994fd6c1db2bda0dda9dd777e9255e744faf1d2707cbe92d6f6bc91faf8aef0e not found: ID does not exist" containerID="994fd6c1db2bda0dda9dd777e9255e744faf1d2707cbe92d6f6bc91faf8aef0e" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.080747 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"994fd6c1db2bda0dda9dd777e9255e744faf1d2707cbe92d6f6bc91faf8aef0e"} err="failed to get container status \"994fd6c1db2bda0dda9dd777e9255e744faf1d2707cbe92d6f6bc91faf8aef0e\": rpc error: code = NotFound desc = could not find container \"994fd6c1db2bda0dda9dd777e9255e744faf1d2707cbe92d6f6bc91faf8aef0e\": container with ID starting with 994fd6c1db2bda0dda9dd777e9255e744faf1d2707cbe92d6f6bc91faf8aef0e not found: ID does not exist" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.080774 4784 scope.go:117] "RemoveContainer" containerID="9ef60d8e4684f081ad96d39d4da3ebd22afce5fcee64a9ca08b3c9ca41ffb8a0" Jan 06 09:12:57 crc kubenswrapper[4784]: E0106 09:12:57.081327 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ef60d8e4684f081ad96d39d4da3ebd22afce5fcee64a9ca08b3c9ca41ffb8a0\": container with ID starting with 9ef60d8e4684f081ad96d39d4da3ebd22afce5fcee64a9ca08b3c9ca41ffb8a0 not found: ID does not exist" containerID="9ef60d8e4684f081ad96d39d4da3ebd22afce5fcee64a9ca08b3c9ca41ffb8a0" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.081391 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ef60d8e4684f081ad96d39d4da3ebd22afce5fcee64a9ca08b3c9ca41ffb8a0"} err="failed to get container status \"9ef60d8e4684f081ad96d39d4da3ebd22afce5fcee64a9ca08b3c9ca41ffb8a0\": rpc error: code = NotFound desc = could not find container \"9ef60d8e4684f081ad96d39d4da3ebd22afce5fcee64a9ca08b3c9ca41ffb8a0\": container with ID starting with 9ef60d8e4684f081ad96d39d4da3ebd22afce5fcee64a9ca08b3c9ca41ffb8a0 not found: ID does not exist" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.081426 4784 scope.go:117] "RemoveContainer" containerID="5a54245355d6eeb2cf6123f56567a9befaf0ee975b21eb3881600668f8b264e4" Jan 06 09:12:57 crc kubenswrapper[4784]: E0106 09:12:57.081864 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a54245355d6eeb2cf6123f56567a9befaf0ee975b21eb3881600668f8b264e4\": container with ID starting with 5a54245355d6eeb2cf6123f56567a9befaf0ee975b21eb3881600668f8b264e4 not found: ID does not exist" containerID="5a54245355d6eeb2cf6123f56567a9befaf0ee975b21eb3881600668f8b264e4" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.081885 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a54245355d6eeb2cf6123f56567a9befaf0ee975b21eb3881600668f8b264e4"} err="failed to get container status \"5a54245355d6eeb2cf6123f56567a9befaf0ee975b21eb3881600668f8b264e4\": rpc error: code = NotFound desc = could not find container \"5a54245355d6eeb2cf6123f56567a9befaf0ee975b21eb3881600668f8b264e4\": container with ID starting with 5a54245355d6eeb2cf6123f56567a9befaf0ee975b21eb3881600668f8b264e4 not found: ID does not exist" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.170387 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed977cc0-b1e9-4c53-b660-846ec8f7df28-catalog-content\") pod \"ed977cc0-b1e9-4c53-b660-846ec8f7df28\" (UID: \"ed977cc0-b1e9-4c53-b660-846ec8f7df28\") " Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.170582 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed977cc0-b1e9-4c53-b660-846ec8f7df28-utilities\") pod \"ed977cc0-b1e9-4c53-b660-846ec8f7df28\" (UID: \"ed977cc0-b1e9-4c53-b660-846ec8f7df28\") " Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.170637 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9756q\" (UniqueName: \"kubernetes.io/projected/ed977cc0-b1e9-4c53-b660-846ec8f7df28-kube-api-access-9756q\") pod \"ed977cc0-b1e9-4c53-b660-846ec8f7df28\" (UID: \"ed977cc0-b1e9-4c53-b660-846ec8f7df28\") " Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.172636 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed977cc0-b1e9-4c53-b660-846ec8f7df28-utilities" (OuterVolumeSpecName: "utilities") pod "ed977cc0-b1e9-4c53-b660-846ec8f7df28" (UID: "ed977cc0-b1e9-4c53-b660-846ec8f7df28"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.179754 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed977cc0-b1e9-4c53-b660-846ec8f7df28-kube-api-access-9756q" (OuterVolumeSpecName: "kube-api-access-9756q") pod "ed977cc0-b1e9-4c53-b660-846ec8f7df28" (UID: "ed977cc0-b1e9-4c53-b660-846ec8f7df28"). InnerVolumeSpecName "kube-api-access-9756q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.198344 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed977cc0-b1e9-4c53-b660-846ec8f7df28-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ed977cc0-b1e9-4c53-b660-846ec8f7df28" (UID: "ed977cc0-b1e9-4c53-b660-846ec8f7df28"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.272127 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed977cc0-b1e9-4c53-b660-846ec8f7df28-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.272163 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9756q\" (UniqueName: \"kubernetes.io/projected/ed977cc0-b1e9-4c53-b660-846ec8f7df28-kube-api-access-9756q\") on node \"crc\" DevicePath \"\"" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.272175 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed977cc0-b1e9-4c53-b660-846ec8f7df28-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.370402 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pfnt9"] Jan 06 09:12:57 crc kubenswrapper[4784]: I0106 09:12:57.375846 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pfnt9"] Jan 06 09:12:58 crc kubenswrapper[4784]: I0106 09:12:58.322933 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed977cc0-b1e9-4c53-b660-846ec8f7df28" path="/var/lib/kubelet/pods/ed977cc0-b1e9-4c53-b660-846ec8f7df28/volumes" Jan 06 09:13:14 crc kubenswrapper[4784]: I0106 09:13:14.351537 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:13:14 crc kubenswrapper[4784]: I0106 09:13:14.352090 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:13:14 crc kubenswrapper[4784]: I0106 09:13:14.352151 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 09:13:14 crc kubenswrapper[4784]: I0106 09:13:14.352708 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"54d6f3700abe17d895a423989a384b6dda55f512a756e39aace475d4033e847e"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 09:13:14 crc kubenswrapper[4784]: I0106 09:13:14.352761 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://54d6f3700abe17d895a423989a384b6dda55f512a756e39aace475d4033e847e" gracePeriod=600 Jan 06 09:13:15 crc kubenswrapper[4784]: I0106 09:13:15.173082 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="54d6f3700abe17d895a423989a384b6dda55f512a756e39aace475d4033e847e" exitCode=0 Jan 06 09:13:15 crc kubenswrapper[4784]: I0106 09:13:15.173158 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"54d6f3700abe17d895a423989a384b6dda55f512a756e39aace475d4033e847e"} Jan 06 09:13:15 crc kubenswrapper[4784]: I0106 09:13:15.174087 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b"} Jan 06 09:13:15 crc kubenswrapper[4784]: I0106 09:13:15.174120 4784 scope.go:117] "RemoveContainer" containerID="133085feddcf4f1eb2ad4c3ae2dc0e678af81990901cc145522e7bcddda13285" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.167025 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j"] Jan 06 09:15:00 crc kubenswrapper[4784]: E0106 09:15:00.168984 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed977cc0-b1e9-4c53-b660-846ec8f7df28" containerName="registry-server" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.169009 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed977cc0-b1e9-4c53-b660-846ec8f7df28" containerName="registry-server" Jan 06 09:15:00 crc kubenswrapper[4784]: E0106 09:15:00.169022 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed977cc0-b1e9-4c53-b660-846ec8f7df28" containerName="extract-utilities" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.169032 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed977cc0-b1e9-4c53-b660-846ec8f7df28" containerName="extract-utilities" Jan 06 09:15:00 crc kubenswrapper[4784]: E0106 09:15:00.169056 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed977cc0-b1e9-4c53-b660-846ec8f7df28" containerName="extract-content" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.169065 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed977cc0-b1e9-4c53-b660-846ec8f7df28" containerName="extract-content" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.169262 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed977cc0-b1e9-4c53-b660-846ec8f7df28" containerName="registry-server" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.170005 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.174465 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.174479 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.183316 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j"] Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.298745 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9tnj\" (UniqueName: \"kubernetes.io/projected/83b5cb3b-c6b0-4597-8178-0386178664c5-kube-api-access-n9tnj\") pod \"collect-profiles-29461515-ckn9j\" (UID: \"83b5cb3b-c6b0-4597-8178-0386178664c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.299282 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/83b5cb3b-c6b0-4597-8178-0386178664c5-secret-volume\") pod \"collect-profiles-29461515-ckn9j\" (UID: \"83b5cb3b-c6b0-4597-8178-0386178664c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.299610 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/83b5cb3b-c6b0-4597-8178-0386178664c5-config-volume\") pod \"collect-profiles-29461515-ckn9j\" (UID: \"83b5cb3b-c6b0-4597-8178-0386178664c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.401083 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/83b5cb3b-c6b0-4597-8178-0386178664c5-config-volume\") pod \"collect-profiles-29461515-ckn9j\" (UID: \"83b5cb3b-c6b0-4597-8178-0386178664c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.401175 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9tnj\" (UniqueName: \"kubernetes.io/projected/83b5cb3b-c6b0-4597-8178-0386178664c5-kube-api-access-n9tnj\") pod \"collect-profiles-29461515-ckn9j\" (UID: \"83b5cb3b-c6b0-4597-8178-0386178664c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.401205 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/83b5cb3b-c6b0-4597-8178-0386178664c5-secret-volume\") pod \"collect-profiles-29461515-ckn9j\" (UID: \"83b5cb3b-c6b0-4597-8178-0386178664c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.403521 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/83b5cb3b-c6b0-4597-8178-0386178664c5-config-volume\") pod \"collect-profiles-29461515-ckn9j\" (UID: \"83b5cb3b-c6b0-4597-8178-0386178664c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.413441 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/83b5cb3b-c6b0-4597-8178-0386178664c5-secret-volume\") pod \"collect-profiles-29461515-ckn9j\" (UID: \"83b5cb3b-c6b0-4597-8178-0386178664c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.419935 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9tnj\" (UniqueName: \"kubernetes.io/projected/83b5cb3b-c6b0-4597-8178-0386178664c5-kube-api-access-n9tnj\") pod \"collect-profiles-29461515-ckn9j\" (UID: \"83b5cb3b-c6b0-4597-8178-0386178664c5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.492761 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" Jan 06 09:15:00 crc kubenswrapper[4784]: I0106 09:15:00.958538 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j"] Jan 06 09:15:01 crc kubenswrapper[4784]: I0106 09:15:01.098740 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" event={"ID":"83b5cb3b-c6b0-4597-8178-0386178664c5","Type":"ContainerStarted","Data":"4c2a6b6b2f4ab399311cd24c47bbf5939866f39356805b20a2afb59c31b718d1"} Jan 06 09:15:02 crc kubenswrapper[4784]: I0106 09:15:02.110057 4784 generic.go:334] "Generic (PLEG): container finished" podID="83b5cb3b-c6b0-4597-8178-0386178664c5" containerID="ea002e2d17fe3f1aeb9185b5f6ab265956ea09312b91ca6577fe6d3d81a31927" exitCode=0 Jan 06 09:15:02 crc kubenswrapper[4784]: I0106 09:15:02.110121 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" event={"ID":"83b5cb3b-c6b0-4597-8178-0386178664c5","Type":"ContainerDied","Data":"ea002e2d17fe3f1aeb9185b5f6ab265956ea09312b91ca6577fe6d3d81a31927"} Jan 06 09:15:03 crc kubenswrapper[4784]: I0106 09:15:03.397962 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" Jan 06 09:15:03 crc kubenswrapper[4784]: I0106 09:15:03.544772 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/83b5cb3b-c6b0-4597-8178-0386178664c5-config-volume\") pod \"83b5cb3b-c6b0-4597-8178-0386178664c5\" (UID: \"83b5cb3b-c6b0-4597-8178-0386178664c5\") " Jan 06 09:15:03 crc kubenswrapper[4784]: I0106 09:15:03.544832 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/83b5cb3b-c6b0-4597-8178-0386178664c5-secret-volume\") pod \"83b5cb3b-c6b0-4597-8178-0386178664c5\" (UID: \"83b5cb3b-c6b0-4597-8178-0386178664c5\") " Jan 06 09:15:03 crc kubenswrapper[4784]: I0106 09:15:03.544886 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9tnj\" (UniqueName: \"kubernetes.io/projected/83b5cb3b-c6b0-4597-8178-0386178664c5-kube-api-access-n9tnj\") pod \"83b5cb3b-c6b0-4597-8178-0386178664c5\" (UID: \"83b5cb3b-c6b0-4597-8178-0386178664c5\") " Jan 06 09:15:03 crc kubenswrapper[4784]: I0106 09:15:03.545356 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83b5cb3b-c6b0-4597-8178-0386178664c5-config-volume" (OuterVolumeSpecName: "config-volume") pod "83b5cb3b-c6b0-4597-8178-0386178664c5" (UID: "83b5cb3b-c6b0-4597-8178-0386178664c5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:15:03 crc kubenswrapper[4784]: I0106 09:15:03.552052 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83b5cb3b-c6b0-4597-8178-0386178664c5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "83b5cb3b-c6b0-4597-8178-0386178664c5" (UID: "83b5cb3b-c6b0-4597-8178-0386178664c5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:15:03 crc kubenswrapper[4784]: I0106 09:15:03.553419 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83b5cb3b-c6b0-4597-8178-0386178664c5-kube-api-access-n9tnj" (OuterVolumeSpecName: "kube-api-access-n9tnj") pod "83b5cb3b-c6b0-4597-8178-0386178664c5" (UID: "83b5cb3b-c6b0-4597-8178-0386178664c5"). InnerVolumeSpecName "kube-api-access-n9tnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:15:03 crc kubenswrapper[4784]: I0106 09:15:03.646204 4784 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/83b5cb3b-c6b0-4597-8178-0386178664c5-config-volume\") on node \"crc\" DevicePath \"\"" Jan 06 09:15:03 crc kubenswrapper[4784]: I0106 09:15:03.646244 4784 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/83b5cb3b-c6b0-4597-8178-0386178664c5-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 06 09:15:03 crc kubenswrapper[4784]: I0106 09:15:03.646257 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9tnj\" (UniqueName: \"kubernetes.io/projected/83b5cb3b-c6b0-4597-8178-0386178664c5-kube-api-access-n9tnj\") on node \"crc\" DevicePath \"\"" Jan 06 09:15:04 crc kubenswrapper[4784]: I0106 09:15:04.128440 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" event={"ID":"83b5cb3b-c6b0-4597-8178-0386178664c5","Type":"ContainerDied","Data":"4c2a6b6b2f4ab399311cd24c47bbf5939866f39356805b20a2afb59c31b718d1"} Jan 06 09:15:04 crc kubenswrapper[4784]: I0106 09:15:04.128486 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4c2a6b6b2f4ab399311cd24c47bbf5939866f39356805b20a2afb59c31b718d1" Jan 06 09:15:04 crc kubenswrapper[4784]: I0106 09:15:04.128530 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461515-ckn9j" Jan 06 09:15:04 crc kubenswrapper[4784]: I0106 09:15:04.501343 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6"] Jan 06 09:15:04 crc kubenswrapper[4784]: I0106 09:15:04.512241 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461470-h2zw6"] Jan 06 09:15:06 crc kubenswrapper[4784]: I0106 09:15:06.328458 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fbbcb33d-d688-44bb-bc55-791e7269c3fa" path="/var/lib/kubelet/pods/fbbcb33d-d688-44bb-bc55-791e7269c3fa/volumes" Jan 06 09:15:15 crc kubenswrapper[4784]: I0106 09:15:15.057240 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:15:15 crc kubenswrapper[4784]: I0106 09:15:15.058013 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:15:16 crc kubenswrapper[4784]: I0106 09:15:16.698530 4784 scope.go:117] "RemoveContainer" containerID="1f5b712a8108b418cfdd98f97b2cfffee71948b615688dec98be80b41b3b508d" Jan 06 09:15:36 crc kubenswrapper[4784]: I0106 09:15:36.566144 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r5qr5"] Jan 06 09:15:36 crc kubenswrapper[4784]: E0106 09:15:36.567582 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83b5cb3b-c6b0-4597-8178-0386178664c5" containerName="collect-profiles" Jan 06 09:15:36 crc kubenswrapper[4784]: I0106 09:15:36.567615 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="83b5cb3b-c6b0-4597-8178-0386178664c5" containerName="collect-profiles" Jan 06 09:15:36 crc kubenswrapper[4784]: I0106 09:15:36.567974 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="83b5cb3b-c6b0-4597-8178-0386178664c5" containerName="collect-profiles" Jan 06 09:15:36 crc kubenswrapper[4784]: I0106 09:15:36.570205 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:36 crc kubenswrapper[4784]: I0106 09:15:36.573182 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r5qr5"] Jan 06 09:15:36 crc kubenswrapper[4784]: I0106 09:15:36.713835 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/912e9ee3-d780-4a1c-97d5-aaf57b58b403-catalog-content\") pod \"certified-operators-r5qr5\" (UID: \"912e9ee3-d780-4a1c-97d5-aaf57b58b403\") " pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:36 crc kubenswrapper[4784]: I0106 09:15:36.713890 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/912e9ee3-d780-4a1c-97d5-aaf57b58b403-utilities\") pod \"certified-operators-r5qr5\" (UID: \"912e9ee3-d780-4a1c-97d5-aaf57b58b403\") " pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:36 crc kubenswrapper[4784]: I0106 09:15:36.713914 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cs8n5\" (UniqueName: \"kubernetes.io/projected/912e9ee3-d780-4a1c-97d5-aaf57b58b403-kube-api-access-cs8n5\") pod \"certified-operators-r5qr5\" (UID: \"912e9ee3-d780-4a1c-97d5-aaf57b58b403\") " pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:36 crc kubenswrapper[4784]: I0106 09:15:36.816228 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/912e9ee3-d780-4a1c-97d5-aaf57b58b403-catalog-content\") pod \"certified-operators-r5qr5\" (UID: \"912e9ee3-d780-4a1c-97d5-aaf57b58b403\") " pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:36 crc kubenswrapper[4784]: I0106 09:15:36.816302 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/912e9ee3-d780-4a1c-97d5-aaf57b58b403-utilities\") pod \"certified-operators-r5qr5\" (UID: \"912e9ee3-d780-4a1c-97d5-aaf57b58b403\") " pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:36 crc kubenswrapper[4784]: I0106 09:15:36.816402 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cs8n5\" (UniqueName: \"kubernetes.io/projected/912e9ee3-d780-4a1c-97d5-aaf57b58b403-kube-api-access-cs8n5\") pod \"certified-operators-r5qr5\" (UID: \"912e9ee3-d780-4a1c-97d5-aaf57b58b403\") " pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:36 crc kubenswrapper[4784]: I0106 09:15:36.817083 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/912e9ee3-d780-4a1c-97d5-aaf57b58b403-utilities\") pod \"certified-operators-r5qr5\" (UID: \"912e9ee3-d780-4a1c-97d5-aaf57b58b403\") " pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:36 crc kubenswrapper[4784]: I0106 09:15:36.817453 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/912e9ee3-d780-4a1c-97d5-aaf57b58b403-catalog-content\") pod \"certified-operators-r5qr5\" (UID: \"912e9ee3-d780-4a1c-97d5-aaf57b58b403\") " pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:36 crc kubenswrapper[4784]: I0106 09:15:36.854305 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cs8n5\" (UniqueName: \"kubernetes.io/projected/912e9ee3-d780-4a1c-97d5-aaf57b58b403-kube-api-access-cs8n5\") pod \"certified-operators-r5qr5\" (UID: \"912e9ee3-d780-4a1c-97d5-aaf57b58b403\") " pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:36 crc kubenswrapper[4784]: I0106 09:15:36.905447 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:37 crc kubenswrapper[4784]: I0106 09:15:37.237207 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r5qr5"] Jan 06 09:15:37 crc kubenswrapper[4784]: I0106 09:15:37.457096 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5qr5" event={"ID":"912e9ee3-d780-4a1c-97d5-aaf57b58b403","Type":"ContainerStarted","Data":"46d0b090862b04da3de882848c0db98cf1b128537107d0afdf48dd5de5654fc3"} Jan 06 09:15:37 crc kubenswrapper[4784]: I0106 09:15:37.457153 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5qr5" event={"ID":"912e9ee3-d780-4a1c-97d5-aaf57b58b403","Type":"ContainerStarted","Data":"27b10d4a3333ea09ff84ba2987e2797658a67cd33dac168ff0572256ad3416ef"} Jan 06 09:15:38 crc kubenswrapper[4784]: I0106 09:15:38.466166 4784 generic.go:334] "Generic (PLEG): container finished" podID="912e9ee3-d780-4a1c-97d5-aaf57b58b403" containerID="46d0b090862b04da3de882848c0db98cf1b128537107d0afdf48dd5de5654fc3" exitCode=0 Jan 06 09:15:38 crc kubenswrapper[4784]: I0106 09:15:38.466252 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5qr5" event={"ID":"912e9ee3-d780-4a1c-97d5-aaf57b58b403","Type":"ContainerDied","Data":"46d0b090862b04da3de882848c0db98cf1b128537107d0afdf48dd5de5654fc3"} Jan 06 09:15:39 crc kubenswrapper[4784]: I0106 09:15:39.174218 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qc4rb"] Jan 06 09:15:39 crc kubenswrapper[4784]: I0106 09:15:39.176063 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:39 crc kubenswrapper[4784]: I0106 09:15:39.183650 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qc4rb"] Jan 06 09:15:39 crc kubenswrapper[4784]: I0106 09:15:39.261172 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qj7sj\" (UniqueName: \"kubernetes.io/projected/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-kube-api-access-qj7sj\") pod \"community-operators-qc4rb\" (UID: \"299f6b71-a2ff-444c-8ce7-95b700a4b0e7\") " pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:39 crc kubenswrapper[4784]: I0106 09:15:39.261471 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-catalog-content\") pod \"community-operators-qc4rb\" (UID: \"299f6b71-a2ff-444c-8ce7-95b700a4b0e7\") " pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:39 crc kubenswrapper[4784]: I0106 09:15:39.261612 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-utilities\") pod \"community-operators-qc4rb\" (UID: \"299f6b71-a2ff-444c-8ce7-95b700a4b0e7\") " pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:39 crc kubenswrapper[4784]: I0106 09:15:39.363305 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-utilities\") pod \"community-operators-qc4rb\" (UID: \"299f6b71-a2ff-444c-8ce7-95b700a4b0e7\") " pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:39 crc kubenswrapper[4784]: I0106 09:15:39.363371 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qj7sj\" (UniqueName: \"kubernetes.io/projected/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-kube-api-access-qj7sj\") pod \"community-operators-qc4rb\" (UID: \"299f6b71-a2ff-444c-8ce7-95b700a4b0e7\") " pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:39 crc kubenswrapper[4784]: I0106 09:15:39.363402 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-catalog-content\") pod \"community-operators-qc4rb\" (UID: \"299f6b71-a2ff-444c-8ce7-95b700a4b0e7\") " pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:39 crc kubenswrapper[4784]: I0106 09:15:39.363806 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-catalog-content\") pod \"community-operators-qc4rb\" (UID: \"299f6b71-a2ff-444c-8ce7-95b700a4b0e7\") " pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:39 crc kubenswrapper[4784]: I0106 09:15:39.363985 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-utilities\") pod \"community-operators-qc4rb\" (UID: \"299f6b71-a2ff-444c-8ce7-95b700a4b0e7\") " pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:39 crc kubenswrapper[4784]: I0106 09:15:39.387956 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qj7sj\" (UniqueName: \"kubernetes.io/projected/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-kube-api-access-qj7sj\") pod \"community-operators-qc4rb\" (UID: \"299f6b71-a2ff-444c-8ce7-95b700a4b0e7\") " pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:39 crc kubenswrapper[4784]: I0106 09:15:39.475153 4784 generic.go:334] "Generic (PLEG): container finished" podID="912e9ee3-d780-4a1c-97d5-aaf57b58b403" containerID="ff9ce414cd72ba95bfc8de0d7bcdecd0660c9f5b8cff6954e5cb0fee5ab6593b" exitCode=0 Jan 06 09:15:39 crc kubenswrapper[4784]: I0106 09:15:39.475196 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5qr5" event={"ID":"912e9ee3-d780-4a1c-97d5-aaf57b58b403","Type":"ContainerDied","Data":"ff9ce414cd72ba95bfc8de0d7bcdecd0660c9f5b8cff6954e5cb0fee5ab6593b"} Jan 06 09:15:39 crc kubenswrapper[4784]: I0106 09:15:39.541556 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:40 crc kubenswrapper[4784]: I0106 09:15:40.105064 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qc4rb"] Jan 06 09:15:40 crc kubenswrapper[4784]: W0106 09:15:40.111675 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod299f6b71_a2ff_444c_8ce7_95b700a4b0e7.slice/crio-b7526b16fdd350a87a5fc0e6fe7fb87b76f22967caae4e30142247984f40d7d9 WatchSource:0}: Error finding container b7526b16fdd350a87a5fc0e6fe7fb87b76f22967caae4e30142247984f40d7d9: Status 404 returned error can't find the container with id b7526b16fdd350a87a5fc0e6fe7fb87b76f22967caae4e30142247984f40d7d9 Jan 06 09:15:40 crc kubenswrapper[4784]: I0106 09:15:40.492776 4784 generic.go:334] "Generic (PLEG): container finished" podID="299f6b71-a2ff-444c-8ce7-95b700a4b0e7" containerID="6dd708109ad9cb4f7a630f85462415f2b6f050c0c45f2e99669e48e52a1ae084" exitCode=0 Jan 06 09:15:40 crc kubenswrapper[4784]: I0106 09:15:40.492862 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qc4rb" event={"ID":"299f6b71-a2ff-444c-8ce7-95b700a4b0e7","Type":"ContainerDied","Data":"6dd708109ad9cb4f7a630f85462415f2b6f050c0c45f2e99669e48e52a1ae084"} Jan 06 09:15:40 crc kubenswrapper[4784]: I0106 09:15:40.492892 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qc4rb" event={"ID":"299f6b71-a2ff-444c-8ce7-95b700a4b0e7","Type":"ContainerStarted","Data":"b7526b16fdd350a87a5fc0e6fe7fb87b76f22967caae4e30142247984f40d7d9"} Jan 06 09:15:40 crc kubenswrapper[4784]: I0106 09:15:40.499925 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5qr5" event={"ID":"912e9ee3-d780-4a1c-97d5-aaf57b58b403","Type":"ContainerStarted","Data":"ea20dc853da6aec580cb5e20c5d3987938ba99e88fef5078e361e79beb9367bb"} Jan 06 09:15:40 crc kubenswrapper[4784]: I0106 09:15:40.536484 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r5qr5" podStartSLOduration=1.9642079140000002 podStartE2EDuration="4.536460322s" podCreationTimestamp="2026-01-06 09:15:36 +0000 UTC" firstStartedPulling="2026-01-06 09:15:37.460082644 +0000 UTC m=+3639.506255491" lastFinishedPulling="2026-01-06 09:15:40.032335032 +0000 UTC m=+3642.078507899" observedRunningTime="2026-01-06 09:15:40.535109611 +0000 UTC m=+3642.581282458" watchObservedRunningTime="2026-01-06 09:15:40.536460322 +0000 UTC m=+3642.582633179" Jan 06 09:15:42 crc kubenswrapper[4784]: I0106 09:15:42.520642 4784 generic.go:334] "Generic (PLEG): container finished" podID="299f6b71-a2ff-444c-8ce7-95b700a4b0e7" containerID="e830f71ab652a44ada92d5b9a61162e9430830ebe4c37ce5e0031300a27df633" exitCode=0 Jan 06 09:15:42 crc kubenswrapper[4784]: I0106 09:15:42.520795 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qc4rb" event={"ID":"299f6b71-a2ff-444c-8ce7-95b700a4b0e7","Type":"ContainerDied","Data":"e830f71ab652a44ada92d5b9a61162e9430830ebe4c37ce5e0031300a27df633"} Jan 06 09:15:43 crc kubenswrapper[4784]: I0106 09:15:43.534620 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qc4rb" event={"ID":"299f6b71-a2ff-444c-8ce7-95b700a4b0e7","Type":"ContainerStarted","Data":"be23b162b9af5c3a78a9de7fe7fe6614a93f55078a31d895425acb191a3ed3dc"} Jan 06 09:15:43 crc kubenswrapper[4784]: I0106 09:15:43.578342 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qc4rb" podStartSLOduration=2.054477325 podStartE2EDuration="4.578317057s" podCreationTimestamp="2026-01-06 09:15:39 +0000 UTC" firstStartedPulling="2026-01-06 09:15:40.498033217 +0000 UTC m=+3642.544206064" lastFinishedPulling="2026-01-06 09:15:43.021872919 +0000 UTC m=+3645.068045796" observedRunningTime="2026-01-06 09:15:43.571101602 +0000 UTC m=+3645.617274489" watchObservedRunningTime="2026-01-06 09:15:43.578317057 +0000 UTC m=+3645.624489924" Jan 06 09:15:44 crc kubenswrapper[4784]: I0106 09:15:44.351674 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:15:44 crc kubenswrapper[4784]: I0106 09:15:44.351765 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:15:46 crc kubenswrapper[4784]: I0106 09:15:46.906542 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:46 crc kubenswrapper[4784]: I0106 09:15:46.906700 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:46 crc kubenswrapper[4784]: I0106 09:15:46.973528 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:47 crc kubenswrapper[4784]: I0106 09:15:47.643678 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:48 crc kubenswrapper[4784]: I0106 09:15:48.144716 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r5qr5"] Jan 06 09:15:49 crc kubenswrapper[4784]: I0106 09:15:49.543105 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:49 crc kubenswrapper[4784]: I0106 09:15:49.543449 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:49 crc kubenswrapper[4784]: I0106 09:15:49.620026 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:49 crc kubenswrapper[4784]: I0106 09:15:49.627023 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r5qr5" podUID="912e9ee3-d780-4a1c-97d5-aaf57b58b403" containerName="registry-server" containerID="cri-o://ea20dc853da6aec580cb5e20c5d3987938ba99e88fef5078e361e79beb9367bb" gracePeriod=2 Jan 06 09:15:49 crc kubenswrapper[4784]: I0106 09:15:49.706151 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:50 crc kubenswrapper[4784]: I0106 09:15:50.548649 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qc4rb"] Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.265561 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.388716 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cs8n5\" (UniqueName: \"kubernetes.io/projected/912e9ee3-d780-4a1c-97d5-aaf57b58b403-kube-api-access-cs8n5\") pod \"912e9ee3-d780-4a1c-97d5-aaf57b58b403\" (UID: \"912e9ee3-d780-4a1c-97d5-aaf57b58b403\") " Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.388898 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/912e9ee3-d780-4a1c-97d5-aaf57b58b403-utilities\") pod \"912e9ee3-d780-4a1c-97d5-aaf57b58b403\" (UID: \"912e9ee3-d780-4a1c-97d5-aaf57b58b403\") " Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.388989 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/912e9ee3-d780-4a1c-97d5-aaf57b58b403-catalog-content\") pod \"912e9ee3-d780-4a1c-97d5-aaf57b58b403\" (UID: \"912e9ee3-d780-4a1c-97d5-aaf57b58b403\") " Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.389750 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/912e9ee3-d780-4a1c-97d5-aaf57b58b403-utilities" (OuterVolumeSpecName: "utilities") pod "912e9ee3-d780-4a1c-97d5-aaf57b58b403" (UID: "912e9ee3-d780-4a1c-97d5-aaf57b58b403"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.401727 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/912e9ee3-d780-4a1c-97d5-aaf57b58b403-kube-api-access-cs8n5" (OuterVolumeSpecName: "kube-api-access-cs8n5") pod "912e9ee3-d780-4a1c-97d5-aaf57b58b403" (UID: "912e9ee3-d780-4a1c-97d5-aaf57b58b403"). InnerVolumeSpecName "kube-api-access-cs8n5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.466302 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/912e9ee3-d780-4a1c-97d5-aaf57b58b403-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "912e9ee3-d780-4a1c-97d5-aaf57b58b403" (UID: "912e9ee3-d780-4a1c-97d5-aaf57b58b403"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.491536 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/912e9ee3-d780-4a1c-97d5-aaf57b58b403-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.492015 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cs8n5\" (UniqueName: \"kubernetes.io/projected/912e9ee3-d780-4a1c-97d5-aaf57b58b403-kube-api-access-cs8n5\") on node \"crc\" DevicePath \"\"" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.492031 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/912e9ee3-d780-4a1c-97d5-aaf57b58b403-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.650105 4784 generic.go:334] "Generic (PLEG): container finished" podID="912e9ee3-d780-4a1c-97d5-aaf57b58b403" containerID="ea20dc853da6aec580cb5e20c5d3987938ba99e88fef5078e361e79beb9367bb" exitCode=0 Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.650198 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5qr5" event={"ID":"912e9ee3-d780-4a1c-97d5-aaf57b58b403","Type":"ContainerDied","Data":"ea20dc853da6aec580cb5e20c5d3987938ba99e88fef5078e361e79beb9367bb"} Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.650286 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5qr5" event={"ID":"912e9ee3-d780-4a1c-97d5-aaf57b58b403","Type":"ContainerDied","Data":"27b10d4a3333ea09ff84ba2987e2797658a67cd33dac168ff0572256ad3416ef"} Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.650328 4784 scope.go:117] "RemoveContainer" containerID="ea20dc853da6aec580cb5e20c5d3987938ba99e88fef5078e361e79beb9367bb" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.650496 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qc4rb" podUID="299f6b71-a2ff-444c-8ce7-95b700a4b0e7" containerName="registry-server" containerID="cri-o://be23b162b9af5c3a78a9de7fe7fe6614a93f55078a31d895425acb191a3ed3dc" gracePeriod=2 Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.650937 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5qr5" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.682392 4784 scope.go:117] "RemoveContainer" containerID="ff9ce414cd72ba95bfc8de0d7bcdecd0660c9f5b8cff6954e5cb0fee5ab6593b" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.706592 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r5qr5"] Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.715671 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r5qr5"] Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.733641 4784 scope.go:117] "RemoveContainer" containerID="46d0b090862b04da3de882848c0db98cf1b128537107d0afdf48dd5de5654fc3" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.844078 4784 scope.go:117] "RemoveContainer" containerID="ea20dc853da6aec580cb5e20c5d3987938ba99e88fef5078e361e79beb9367bb" Jan 06 09:15:51 crc kubenswrapper[4784]: E0106 09:15:51.846750 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea20dc853da6aec580cb5e20c5d3987938ba99e88fef5078e361e79beb9367bb\": container with ID starting with ea20dc853da6aec580cb5e20c5d3987938ba99e88fef5078e361e79beb9367bb not found: ID does not exist" containerID="ea20dc853da6aec580cb5e20c5d3987938ba99e88fef5078e361e79beb9367bb" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.846828 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea20dc853da6aec580cb5e20c5d3987938ba99e88fef5078e361e79beb9367bb"} err="failed to get container status \"ea20dc853da6aec580cb5e20c5d3987938ba99e88fef5078e361e79beb9367bb\": rpc error: code = NotFound desc = could not find container \"ea20dc853da6aec580cb5e20c5d3987938ba99e88fef5078e361e79beb9367bb\": container with ID starting with ea20dc853da6aec580cb5e20c5d3987938ba99e88fef5078e361e79beb9367bb not found: ID does not exist" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.846875 4784 scope.go:117] "RemoveContainer" containerID="ff9ce414cd72ba95bfc8de0d7bcdecd0660c9f5b8cff6954e5cb0fee5ab6593b" Jan 06 09:15:51 crc kubenswrapper[4784]: E0106 09:15:51.860506 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff9ce414cd72ba95bfc8de0d7bcdecd0660c9f5b8cff6954e5cb0fee5ab6593b\": container with ID starting with ff9ce414cd72ba95bfc8de0d7bcdecd0660c9f5b8cff6954e5cb0fee5ab6593b not found: ID does not exist" containerID="ff9ce414cd72ba95bfc8de0d7bcdecd0660c9f5b8cff6954e5cb0fee5ab6593b" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.860627 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff9ce414cd72ba95bfc8de0d7bcdecd0660c9f5b8cff6954e5cb0fee5ab6593b"} err="failed to get container status \"ff9ce414cd72ba95bfc8de0d7bcdecd0660c9f5b8cff6954e5cb0fee5ab6593b\": rpc error: code = NotFound desc = could not find container \"ff9ce414cd72ba95bfc8de0d7bcdecd0660c9f5b8cff6954e5cb0fee5ab6593b\": container with ID starting with ff9ce414cd72ba95bfc8de0d7bcdecd0660c9f5b8cff6954e5cb0fee5ab6593b not found: ID does not exist" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.860672 4784 scope.go:117] "RemoveContainer" containerID="46d0b090862b04da3de882848c0db98cf1b128537107d0afdf48dd5de5654fc3" Jan 06 09:15:51 crc kubenswrapper[4784]: E0106 09:15:51.861785 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46d0b090862b04da3de882848c0db98cf1b128537107d0afdf48dd5de5654fc3\": container with ID starting with 46d0b090862b04da3de882848c0db98cf1b128537107d0afdf48dd5de5654fc3 not found: ID does not exist" containerID="46d0b090862b04da3de882848c0db98cf1b128537107d0afdf48dd5de5654fc3" Jan 06 09:15:51 crc kubenswrapper[4784]: I0106 09:15:51.861849 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46d0b090862b04da3de882848c0db98cf1b128537107d0afdf48dd5de5654fc3"} err="failed to get container status \"46d0b090862b04da3de882848c0db98cf1b128537107d0afdf48dd5de5654fc3\": rpc error: code = NotFound desc = could not find container \"46d0b090862b04da3de882848c0db98cf1b128537107d0afdf48dd5de5654fc3\": container with ID starting with 46d0b090862b04da3de882848c0db98cf1b128537107d0afdf48dd5de5654fc3 not found: ID does not exist" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.135419 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.304411 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-catalog-content\") pod \"299f6b71-a2ff-444c-8ce7-95b700a4b0e7\" (UID: \"299f6b71-a2ff-444c-8ce7-95b700a4b0e7\") " Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.304678 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qj7sj\" (UniqueName: \"kubernetes.io/projected/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-kube-api-access-qj7sj\") pod \"299f6b71-a2ff-444c-8ce7-95b700a4b0e7\" (UID: \"299f6b71-a2ff-444c-8ce7-95b700a4b0e7\") " Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.304781 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-utilities\") pod \"299f6b71-a2ff-444c-8ce7-95b700a4b0e7\" (UID: \"299f6b71-a2ff-444c-8ce7-95b700a4b0e7\") " Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.306025 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-utilities" (OuterVolumeSpecName: "utilities") pod "299f6b71-a2ff-444c-8ce7-95b700a4b0e7" (UID: "299f6b71-a2ff-444c-8ce7-95b700a4b0e7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.309256 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-kube-api-access-qj7sj" (OuterVolumeSpecName: "kube-api-access-qj7sj") pod "299f6b71-a2ff-444c-8ce7-95b700a4b0e7" (UID: "299f6b71-a2ff-444c-8ce7-95b700a4b0e7"). InnerVolumeSpecName "kube-api-access-qj7sj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.329868 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="912e9ee3-d780-4a1c-97d5-aaf57b58b403" path="/var/lib/kubelet/pods/912e9ee3-d780-4a1c-97d5-aaf57b58b403/volumes" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.390669 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "299f6b71-a2ff-444c-8ce7-95b700a4b0e7" (UID: "299f6b71-a2ff-444c-8ce7-95b700a4b0e7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.407144 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qj7sj\" (UniqueName: \"kubernetes.io/projected/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-kube-api-access-qj7sj\") on node \"crc\" DevicePath \"\"" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.407190 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.407207 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/299f6b71-a2ff-444c-8ce7-95b700a4b0e7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.665311 4784 generic.go:334] "Generic (PLEG): container finished" podID="299f6b71-a2ff-444c-8ce7-95b700a4b0e7" containerID="be23b162b9af5c3a78a9de7fe7fe6614a93f55078a31d895425acb191a3ed3dc" exitCode=0 Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.665416 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qc4rb" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.665436 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qc4rb" event={"ID":"299f6b71-a2ff-444c-8ce7-95b700a4b0e7","Type":"ContainerDied","Data":"be23b162b9af5c3a78a9de7fe7fe6614a93f55078a31d895425acb191a3ed3dc"} Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.665504 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qc4rb" event={"ID":"299f6b71-a2ff-444c-8ce7-95b700a4b0e7","Type":"ContainerDied","Data":"b7526b16fdd350a87a5fc0e6fe7fb87b76f22967caae4e30142247984f40d7d9"} Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.665536 4784 scope.go:117] "RemoveContainer" containerID="be23b162b9af5c3a78a9de7fe7fe6614a93f55078a31d895425acb191a3ed3dc" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.701459 4784 scope.go:117] "RemoveContainer" containerID="e830f71ab652a44ada92d5b9a61162e9430830ebe4c37ce5e0031300a27df633" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.738724 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qc4rb"] Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.751751 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qc4rb"] Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.760688 4784 scope.go:117] "RemoveContainer" containerID="6dd708109ad9cb4f7a630f85462415f2b6f050c0c45f2e99669e48e52a1ae084" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.788676 4784 scope.go:117] "RemoveContainer" containerID="be23b162b9af5c3a78a9de7fe7fe6614a93f55078a31d895425acb191a3ed3dc" Jan 06 09:15:52 crc kubenswrapper[4784]: E0106 09:15:52.789485 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be23b162b9af5c3a78a9de7fe7fe6614a93f55078a31d895425acb191a3ed3dc\": container with ID starting with be23b162b9af5c3a78a9de7fe7fe6614a93f55078a31d895425acb191a3ed3dc not found: ID does not exist" containerID="be23b162b9af5c3a78a9de7fe7fe6614a93f55078a31d895425acb191a3ed3dc" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.789576 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be23b162b9af5c3a78a9de7fe7fe6614a93f55078a31d895425acb191a3ed3dc"} err="failed to get container status \"be23b162b9af5c3a78a9de7fe7fe6614a93f55078a31d895425acb191a3ed3dc\": rpc error: code = NotFound desc = could not find container \"be23b162b9af5c3a78a9de7fe7fe6614a93f55078a31d895425acb191a3ed3dc\": container with ID starting with be23b162b9af5c3a78a9de7fe7fe6614a93f55078a31d895425acb191a3ed3dc not found: ID does not exist" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.789629 4784 scope.go:117] "RemoveContainer" containerID="e830f71ab652a44ada92d5b9a61162e9430830ebe4c37ce5e0031300a27df633" Jan 06 09:15:52 crc kubenswrapper[4784]: E0106 09:15:52.790213 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e830f71ab652a44ada92d5b9a61162e9430830ebe4c37ce5e0031300a27df633\": container with ID starting with e830f71ab652a44ada92d5b9a61162e9430830ebe4c37ce5e0031300a27df633 not found: ID does not exist" containerID="e830f71ab652a44ada92d5b9a61162e9430830ebe4c37ce5e0031300a27df633" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.790270 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e830f71ab652a44ada92d5b9a61162e9430830ebe4c37ce5e0031300a27df633"} err="failed to get container status \"e830f71ab652a44ada92d5b9a61162e9430830ebe4c37ce5e0031300a27df633\": rpc error: code = NotFound desc = could not find container \"e830f71ab652a44ada92d5b9a61162e9430830ebe4c37ce5e0031300a27df633\": container with ID starting with e830f71ab652a44ada92d5b9a61162e9430830ebe4c37ce5e0031300a27df633 not found: ID does not exist" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.790309 4784 scope.go:117] "RemoveContainer" containerID="6dd708109ad9cb4f7a630f85462415f2b6f050c0c45f2e99669e48e52a1ae084" Jan 06 09:15:52 crc kubenswrapper[4784]: E0106 09:15:52.790877 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6dd708109ad9cb4f7a630f85462415f2b6f050c0c45f2e99669e48e52a1ae084\": container with ID starting with 6dd708109ad9cb4f7a630f85462415f2b6f050c0c45f2e99669e48e52a1ae084 not found: ID does not exist" containerID="6dd708109ad9cb4f7a630f85462415f2b6f050c0c45f2e99669e48e52a1ae084" Jan 06 09:15:52 crc kubenswrapper[4784]: I0106 09:15:52.790925 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6dd708109ad9cb4f7a630f85462415f2b6f050c0c45f2e99669e48e52a1ae084"} err="failed to get container status \"6dd708109ad9cb4f7a630f85462415f2b6f050c0c45f2e99669e48e52a1ae084\": rpc error: code = NotFound desc = could not find container \"6dd708109ad9cb4f7a630f85462415f2b6f050c0c45f2e99669e48e52a1ae084\": container with ID starting with 6dd708109ad9cb4f7a630f85462415f2b6f050c0c45f2e99669e48e52a1ae084 not found: ID does not exist" Jan 06 09:15:54 crc kubenswrapper[4784]: I0106 09:15:54.328426 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="299f6b71-a2ff-444c-8ce7-95b700a4b0e7" path="/var/lib/kubelet/pods/299f6b71-a2ff-444c-8ce7-95b700a4b0e7/volumes" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.469823 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xb2tv"] Jan 06 09:16:13 crc kubenswrapper[4784]: E0106 09:16:13.470995 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="912e9ee3-d780-4a1c-97d5-aaf57b58b403" containerName="extract-utilities" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.471018 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="912e9ee3-d780-4a1c-97d5-aaf57b58b403" containerName="extract-utilities" Jan 06 09:16:13 crc kubenswrapper[4784]: E0106 09:16:13.471051 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="299f6b71-a2ff-444c-8ce7-95b700a4b0e7" containerName="registry-server" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.471059 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="299f6b71-a2ff-444c-8ce7-95b700a4b0e7" containerName="registry-server" Jan 06 09:16:13 crc kubenswrapper[4784]: E0106 09:16:13.471073 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="299f6b71-a2ff-444c-8ce7-95b700a4b0e7" containerName="extract-content" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.471083 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="299f6b71-a2ff-444c-8ce7-95b700a4b0e7" containerName="extract-content" Jan 06 09:16:13 crc kubenswrapper[4784]: E0106 09:16:13.471093 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="912e9ee3-d780-4a1c-97d5-aaf57b58b403" containerName="extract-content" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.471101 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="912e9ee3-d780-4a1c-97d5-aaf57b58b403" containerName="extract-content" Jan 06 09:16:13 crc kubenswrapper[4784]: E0106 09:16:13.471114 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="299f6b71-a2ff-444c-8ce7-95b700a4b0e7" containerName="extract-utilities" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.471122 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="299f6b71-a2ff-444c-8ce7-95b700a4b0e7" containerName="extract-utilities" Jan 06 09:16:13 crc kubenswrapper[4784]: E0106 09:16:13.471138 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="912e9ee3-d780-4a1c-97d5-aaf57b58b403" containerName="registry-server" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.471146 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="912e9ee3-d780-4a1c-97d5-aaf57b58b403" containerName="registry-server" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.471310 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="299f6b71-a2ff-444c-8ce7-95b700a4b0e7" containerName="registry-server" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.471334 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="912e9ee3-d780-4a1c-97d5-aaf57b58b403" containerName="registry-server" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.472678 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.487628 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xb2tv"] Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.662439 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-utilities\") pod \"redhat-operators-xb2tv\" (UID: \"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e\") " pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.662713 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jjfx\" (UniqueName: \"kubernetes.io/projected/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-kube-api-access-2jjfx\") pod \"redhat-operators-xb2tv\" (UID: \"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e\") " pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.662966 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-catalog-content\") pod \"redhat-operators-xb2tv\" (UID: \"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e\") " pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.763860 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-catalog-content\") pod \"redhat-operators-xb2tv\" (UID: \"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e\") " pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.763955 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-utilities\") pod \"redhat-operators-xb2tv\" (UID: \"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e\") " pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.763994 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jjfx\" (UniqueName: \"kubernetes.io/projected/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-kube-api-access-2jjfx\") pod \"redhat-operators-xb2tv\" (UID: \"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e\") " pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.764953 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-catalog-content\") pod \"redhat-operators-xb2tv\" (UID: \"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e\") " pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.765350 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-utilities\") pod \"redhat-operators-xb2tv\" (UID: \"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e\") " pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:13 crc kubenswrapper[4784]: I0106 09:16:13.792520 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jjfx\" (UniqueName: \"kubernetes.io/projected/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-kube-api-access-2jjfx\") pod \"redhat-operators-xb2tv\" (UID: \"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e\") " pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:14 crc kubenswrapper[4784]: I0106 09:16:14.351057 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:16:14 crc kubenswrapper[4784]: I0106 09:16:14.351139 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:16:14 crc kubenswrapper[4784]: I0106 09:16:14.351199 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 09:16:14 crc kubenswrapper[4784]: I0106 09:16:14.352130 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 09:16:14 crc kubenswrapper[4784]: I0106 09:16:14.352273 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" gracePeriod=600 Jan 06 09:16:14 crc kubenswrapper[4784]: I0106 09:16:14.373099 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:15 crc kubenswrapper[4784]: I0106 09:16:15.240894 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xb2tv"] Jan 06 09:16:15 crc kubenswrapper[4784]: W0106 09:16:15.244763 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode25ea52f_203f_4cff_acd4_d60b5b2b8b0e.slice/crio-7e1050de99f3706ab0e86e31b1390626b4899d046a42a8254fc79ab8d4315c2d WatchSource:0}: Error finding container 7e1050de99f3706ab0e86e31b1390626b4899d046a42a8254fc79ab8d4315c2d: Status 404 returned error can't find the container with id 7e1050de99f3706ab0e86e31b1390626b4899d046a42a8254fc79ab8d4315c2d Jan 06 09:16:15 crc kubenswrapper[4784]: I0106 09:16:15.415009 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" exitCode=0 Jan 06 09:16:15 crc kubenswrapper[4784]: I0106 09:16:15.415045 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b"} Jan 06 09:16:15 crc kubenswrapper[4784]: I0106 09:16:15.415324 4784 scope.go:117] "RemoveContainer" containerID="54d6f3700abe17d895a423989a384b6dda55f512a756e39aace475d4033e847e" Jan 06 09:16:15 crc kubenswrapper[4784]: I0106 09:16:15.419041 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xb2tv" event={"ID":"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e","Type":"ContainerStarted","Data":"7e1050de99f3706ab0e86e31b1390626b4899d046a42a8254fc79ab8d4315c2d"} Jan 06 09:16:15 crc kubenswrapper[4784]: E0106 09:16:15.499286 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:16:16 crc kubenswrapper[4784]: I0106 09:16:16.427412 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:16:16 crc kubenswrapper[4784]: E0106 09:16:16.427656 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:16:16 crc kubenswrapper[4784]: I0106 09:16:16.428130 4784 generic.go:334] "Generic (PLEG): container finished" podID="e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" containerID="da9bf35e994e14190f17ad77b35bdba521827e175401747c16282e379823f93d" exitCode=0 Jan 06 09:16:16 crc kubenswrapper[4784]: I0106 09:16:16.428166 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xb2tv" event={"ID":"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e","Type":"ContainerDied","Data":"da9bf35e994e14190f17ad77b35bdba521827e175401747c16282e379823f93d"} Jan 06 09:16:17 crc kubenswrapper[4784]: I0106 09:16:17.436006 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xb2tv" event={"ID":"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e","Type":"ContainerStarted","Data":"04b4f17097a440ab9a95b684fee103b6e7ec924cce06ef50167daa6f23f80242"} Jan 06 09:16:18 crc kubenswrapper[4784]: I0106 09:16:18.446821 4784 generic.go:334] "Generic (PLEG): container finished" podID="e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" containerID="04b4f17097a440ab9a95b684fee103b6e7ec924cce06ef50167daa6f23f80242" exitCode=0 Jan 06 09:16:18 crc kubenswrapper[4784]: I0106 09:16:18.446959 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xb2tv" event={"ID":"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e","Type":"ContainerDied","Data":"04b4f17097a440ab9a95b684fee103b6e7ec924cce06ef50167daa6f23f80242"} Jan 06 09:16:20 crc kubenswrapper[4784]: I0106 09:16:20.480637 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xb2tv" event={"ID":"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e","Type":"ContainerStarted","Data":"b36ea9e27e777030edc0303dda7eb6c9b52153bf68ac0861b3071d2d9f31c80e"} Jan 06 09:16:20 crc kubenswrapper[4784]: I0106 09:16:20.512062 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xb2tv" podStartSLOduration=4.587926629 podStartE2EDuration="7.512042552s" podCreationTimestamp="2026-01-06 09:16:13 +0000 UTC" firstStartedPulling="2026-01-06 09:16:16.429869568 +0000 UTC m=+3678.476042405" lastFinishedPulling="2026-01-06 09:16:19.353985461 +0000 UTC m=+3681.400158328" observedRunningTime="2026-01-06 09:16:20.505415605 +0000 UTC m=+3682.551588452" watchObservedRunningTime="2026-01-06 09:16:20.512042552 +0000 UTC m=+3682.558215409" Jan 06 09:16:24 crc kubenswrapper[4784]: I0106 09:16:24.374368 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:24 crc kubenswrapper[4784]: I0106 09:16:24.375044 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:25 crc kubenswrapper[4784]: I0106 09:16:25.431153 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xb2tv" podUID="e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" containerName="registry-server" probeResult="failure" output=< Jan 06 09:16:25 crc kubenswrapper[4784]: timeout: failed to connect service ":50051" within 1s Jan 06 09:16:25 crc kubenswrapper[4784]: > Jan 06 09:16:30 crc kubenswrapper[4784]: I0106 09:16:30.312998 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:16:30 crc kubenswrapper[4784]: E0106 09:16:30.314358 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:16:34 crc kubenswrapper[4784]: I0106 09:16:34.452449 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:34 crc kubenswrapper[4784]: I0106 09:16:34.514096 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:34 crc kubenswrapper[4784]: I0106 09:16:34.697754 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xb2tv"] Jan 06 09:16:35 crc kubenswrapper[4784]: I0106 09:16:35.595889 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xb2tv" podUID="e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" containerName="registry-server" containerID="cri-o://b36ea9e27e777030edc0303dda7eb6c9b52153bf68ac0861b3071d2d9f31c80e" gracePeriod=2 Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.192378 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.331098 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-catalog-content\") pod \"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e\" (UID: \"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e\") " Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.331205 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jjfx\" (UniqueName: \"kubernetes.io/projected/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-kube-api-access-2jjfx\") pod \"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e\" (UID: \"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e\") " Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.331291 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-utilities\") pod \"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e\" (UID: \"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e\") " Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.332790 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-utilities" (OuterVolumeSpecName: "utilities") pod "e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" (UID: "e25ea52f-203f-4cff-acd4-d60b5b2b8b0e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.344093 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-kube-api-access-2jjfx" (OuterVolumeSpecName: "kube-api-access-2jjfx") pod "e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" (UID: "e25ea52f-203f-4cff-acd4-d60b5b2b8b0e"). InnerVolumeSpecName "kube-api-access-2jjfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.432642 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jjfx\" (UniqueName: \"kubernetes.io/projected/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-kube-api-access-2jjfx\") on node \"crc\" DevicePath \"\"" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.432874 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.486170 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" (UID: "e25ea52f-203f-4cff-acd4-d60b5b2b8b0e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.534480 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.614707 4784 generic.go:334] "Generic (PLEG): container finished" podID="e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" containerID="b36ea9e27e777030edc0303dda7eb6c9b52153bf68ac0861b3071d2d9f31c80e" exitCode=0 Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.614772 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xb2tv" event={"ID":"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e","Type":"ContainerDied","Data":"b36ea9e27e777030edc0303dda7eb6c9b52153bf68ac0861b3071d2d9f31c80e"} Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.614825 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xb2tv" event={"ID":"e25ea52f-203f-4cff-acd4-d60b5b2b8b0e","Type":"ContainerDied","Data":"7e1050de99f3706ab0e86e31b1390626b4899d046a42a8254fc79ab8d4315c2d"} Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.614837 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xb2tv" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.614920 4784 scope.go:117] "RemoveContainer" containerID="b36ea9e27e777030edc0303dda7eb6c9b52153bf68ac0861b3071d2d9f31c80e" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.654520 4784 scope.go:117] "RemoveContainer" containerID="04b4f17097a440ab9a95b684fee103b6e7ec924cce06ef50167daa6f23f80242" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.664461 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xb2tv"] Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.669491 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xb2tv"] Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.707516 4784 scope.go:117] "RemoveContainer" containerID="da9bf35e994e14190f17ad77b35bdba521827e175401747c16282e379823f93d" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.733030 4784 scope.go:117] "RemoveContainer" containerID="b36ea9e27e777030edc0303dda7eb6c9b52153bf68ac0861b3071d2d9f31c80e" Jan 06 09:16:37 crc kubenswrapper[4784]: E0106 09:16:37.733502 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b36ea9e27e777030edc0303dda7eb6c9b52153bf68ac0861b3071d2d9f31c80e\": container with ID starting with b36ea9e27e777030edc0303dda7eb6c9b52153bf68ac0861b3071d2d9f31c80e not found: ID does not exist" containerID="b36ea9e27e777030edc0303dda7eb6c9b52153bf68ac0861b3071d2d9f31c80e" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.733569 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b36ea9e27e777030edc0303dda7eb6c9b52153bf68ac0861b3071d2d9f31c80e"} err="failed to get container status \"b36ea9e27e777030edc0303dda7eb6c9b52153bf68ac0861b3071d2d9f31c80e\": rpc error: code = NotFound desc = could not find container \"b36ea9e27e777030edc0303dda7eb6c9b52153bf68ac0861b3071d2d9f31c80e\": container with ID starting with b36ea9e27e777030edc0303dda7eb6c9b52153bf68ac0861b3071d2d9f31c80e not found: ID does not exist" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.733606 4784 scope.go:117] "RemoveContainer" containerID="04b4f17097a440ab9a95b684fee103b6e7ec924cce06ef50167daa6f23f80242" Jan 06 09:16:37 crc kubenswrapper[4784]: E0106 09:16:37.734062 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04b4f17097a440ab9a95b684fee103b6e7ec924cce06ef50167daa6f23f80242\": container with ID starting with 04b4f17097a440ab9a95b684fee103b6e7ec924cce06ef50167daa6f23f80242 not found: ID does not exist" containerID="04b4f17097a440ab9a95b684fee103b6e7ec924cce06ef50167daa6f23f80242" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.734104 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04b4f17097a440ab9a95b684fee103b6e7ec924cce06ef50167daa6f23f80242"} err="failed to get container status \"04b4f17097a440ab9a95b684fee103b6e7ec924cce06ef50167daa6f23f80242\": rpc error: code = NotFound desc = could not find container \"04b4f17097a440ab9a95b684fee103b6e7ec924cce06ef50167daa6f23f80242\": container with ID starting with 04b4f17097a440ab9a95b684fee103b6e7ec924cce06ef50167daa6f23f80242 not found: ID does not exist" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.734133 4784 scope.go:117] "RemoveContainer" containerID="da9bf35e994e14190f17ad77b35bdba521827e175401747c16282e379823f93d" Jan 06 09:16:37 crc kubenswrapper[4784]: E0106 09:16:37.734393 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da9bf35e994e14190f17ad77b35bdba521827e175401747c16282e379823f93d\": container with ID starting with da9bf35e994e14190f17ad77b35bdba521827e175401747c16282e379823f93d not found: ID does not exist" containerID="da9bf35e994e14190f17ad77b35bdba521827e175401747c16282e379823f93d" Jan 06 09:16:37 crc kubenswrapper[4784]: I0106 09:16:37.734431 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da9bf35e994e14190f17ad77b35bdba521827e175401747c16282e379823f93d"} err="failed to get container status \"da9bf35e994e14190f17ad77b35bdba521827e175401747c16282e379823f93d\": rpc error: code = NotFound desc = could not find container \"da9bf35e994e14190f17ad77b35bdba521827e175401747c16282e379823f93d\": container with ID starting with da9bf35e994e14190f17ad77b35bdba521827e175401747c16282e379823f93d not found: ID does not exist" Jan 06 09:16:38 crc kubenswrapper[4784]: I0106 09:16:38.325463 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" path="/var/lib/kubelet/pods/e25ea52f-203f-4cff-acd4-d60b5b2b8b0e/volumes" Jan 06 09:16:43 crc kubenswrapper[4784]: I0106 09:16:43.312850 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:16:43 crc kubenswrapper[4784]: E0106 09:16:43.313949 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:16:55 crc kubenswrapper[4784]: I0106 09:16:55.313459 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:16:55 crc kubenswrapper[4784]: E0106 09:16:55.314591 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:17:07 crc kubenswrapper[4784]: I0106 09:17:07.312934 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:17:07 crc kubenswrapper[4784]: E0106 09:17:07.314964 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:17:20 crc kubenswrapper[4784]: I0106 09:17:20.312224 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:17:20 crc kubenswrapper[4784]: E0106 09:17:20.312974 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:17:34 crc kubenswrapper[4784]: I0106 09:17:34.313134 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:17:34 crc kubenswrapper[4784]: E0106 09:17:34.314311 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:17:45 crc kubenswrapper[4784]: I0106 09:17:45.312941 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:17:45 crc kubenswrapper[4784]: E0106 09:17:45.316255 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:17:56 crc kubenswrapper[4784]: I0106 09:17:56.313937 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:17:56 crc kubenswrapper[4784]: E0106 09:17:56.314921 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:18:10 crc kubenswrapper[4784]: I0106 09:18:10.313643 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:18:10 crc kubenswrapper[4784]: E0106 09:18:10.314986 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:18:23 crc kubenswrapper[4784]: I0106 09:18:23.312831 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:18:23 crc kubenswrapper[4784]: E0106 09:18:23.313873 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:18:35 crc kubenswrapper[4784]: I0106 09:18:35.312727 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:18:35 crc kubenswrapper[4784]: E0106 09:18:35.314107 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:18:48 crc kubenswrapper[4784]: I0106 09:18:48.319285 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:18:48 crc kubenswrapper[4784]: E0106 09:18:48.320271 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:18:59 crc kubenswrapper[4784]: I0106 09:18:59.312637 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:18:59 crc kubenswrapper[4784]: E0106 09:18:59.313772 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:19:12 crc kubenswrapper[4784]: I0106 09:19:12.312855 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:19:12 crc kubenswrapper[4784]: E0106 09:19:12.313768 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:19:24 crc kubenswrapper[4784]: I0106 09:19:24.312905 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:19:24 crc kubenswrapper[4784]: E0106 09:19:24.314101 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:19:37 crc kubenswrapper[4784]: I0106 09:19:37.313176 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:19:37 crc kubenswrapper[4784]: E0106 09:19:37.314213 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:19:49 crc kubenswrapper[4784]: I0106 09:19:49.312070 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:19:49 crc kubenswrapper[4784]: E0106 09:19:49.313264 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:20:00 crc kubenswrapper[4784]: I0106 09:20:00.313063 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:20:00 crc kubenswrapper[4784]: E0106 09:20:00.314370 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:20:11 crc kubenswrapper[4784]: I0106 09:20:11.312621 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:20:11 crc kubenswrapper[4784]: E0106 09:20:11.313826 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:20:23 crc kubenswrapper[4784]: I0106 09:20:23.312265 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:20:23 crc kubenswrapper[4784]: E0106 09:20:23.313206 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:20:36 crc kubenswrapper[4784]: I0106 09:20:36.312442 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:20:36 crc kubenswrapper[4784]: E0106 09:20:36.313484 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:20:48 crc kubenswrapper[4784]: I0106 09:20:48.319692 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:20:48 crc kubenswrapper[4784]: E0106 09:20:48.320739 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:21:01 crc kubenswrapper[4784]: I0106 09:21:01.314049 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:21:01 crc kubenswrapper[4784]: E0106 09:21:01.316516 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:21:15 crc kubenswrapper[4784]: I0106 09:21:15.312513 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:21:16 crc kubenswrapper[4784]: I0106 09:21:16.252063 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"3d86236ad369d8703dc9c8fcda8ff98b073f6ebb3d44b9e7f892b5724b95ec07"} Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.094759 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bq42h"] Jan 06 09:23:11 crc kubenswrapper[4784]: E0106 09:23:11.095884 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" containerName="extract-content" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.095907 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" containerName="extract-content" Jan 06 09:23:11 crc kubenswrapper[4784]: E0106 09:23:11.095958 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" containerName="registry-server" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.095970 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" containerName="registry-server" Jan 06 09:23:11 crc kubenswrapper[4784]: E0106 09:23:11.095994 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" containerName="extract-utilities" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.096008 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" containerName="extract-utilities" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.096251 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e25ea52f-203f-4cff-acd4-d60b5b2b8b0e" containerName="registry-server" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.098120 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.102011 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bq42h"] Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.271423 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djb9v\" (UniqueName: \"kubernetes.io/projected/3161e2c1-58ca-4a97-95a1-8085ca4be14c-kube-api-access-djb9v\") pod \"redhat-marketplace-bq42h\" (UID: \"3161e2c1-58ca-4a97-95a1-8085ca4be14c\") " pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.271582 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3161e2c1-58ca-4a97-95a1-8085ca4be14c-catalog-content\") pod \"redhat-marketplace-bq42h\" (UID: \"3161e2c1-58ca-4a97-95a1-8085ca4be14c\") " pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.271863 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3161e2c1-58ca-4a97-95a1-8085ca4be14c-utilities\") pod \"redhat-marketplace-bq42h\" (UID: \"3161e2c1-58ca-4a97-95a1-8085ca4be14c\") " pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.374045 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3161e2c1-58ca-4a97-95a1-8085ca4be14c-catalog-content\") pod \"redhat-marketplace-bq42h\" (UID: \"3161e2c1-58ca-4a97-95a1-8085ca4be14c\") " pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.375044 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3161e2c1-58ca-4a97-95a1-8085ca4be14c-catalog-content\") pod \"redhat-marketplace-bq42h\" (UID: \"3161e2c1-58ca-4a97-95a1-8085ca4be14c\") " pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.375058 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3161e2c1-58ca-4a97-95a1-8085ca4be14c-utilities\") pod \"redhat-marketplace-bq42h\" (UID: \"3161e2c1-58ca-4a97-95a1-8085ca4be14c\") " pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.375233 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djb9v\" (UniqueName: \"kubernetes.io/projected/3161e2c1-58ca-4a97-95a1-8085ca4be14c-kube-api-access-djb9v\") pod \"redhat-marketplace-bq42h\" (UID: \"3161e2c1-58ca-4a97-95a1-8085ca4be14c\") " pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.375713 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3161e2c1-58ca-4a97-95a1-8085ca4be14c-utilities\") pod \"redhat-marketplace-bq42h\" (UID: \"3161e2c1-58ca-4a97-95a1-8085ca4be14c\") " pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.411344 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djb9v\" (UniqueName: \"kubernetes.io/projected/3161e2c1-58ca-4a97-95a1-8085ca4be14c-kube-api-access-djb9v\") pod \"redhat-marketplace-bq42h\" (UID: \"3161e2c1-58ca-4a97-95a1-8085ca4be14c\") " pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.439403 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:11 crc kubenswrapper[4784]: I0106 09:23:11.755527 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bq42h"] Jan 06 09:23:12 crc kubenswrapper[4784]: I0106 09:23:12.351641 4784 generic.go:334] "Generic (PLEG): container finished" podID="3161e2c1-58ca-4a97-95a1-8085ca4be14c" containerID="ace6a42e829aa948c4f582f1d2c5fe9455cb3ab79693cb7ea67f2ced39ac652e" exitCode=0 Jan 06 09:23:12 crc kubenswrapper[4784]: I0106 09:23:12.351694 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bq42h" event={"ID":"3161e2c1-58ca-4a97-95a1-8085ca4be14c","Type":"ContainerDied","Data":"ace6a42e829aa948c4f582f1d2c5fe9455cb3ab79693cb7ea67f2ced39ac652e"} Jan 06 09:23:12 crc kubenswrapper[4784]: I0106 09:23:12.352025 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bq42h" event={"ID":"3161e2c1-58ca-4a97-95a1-8085ca4be14c","Type":"ContainerStarted","Data":"49f416f66cc5be671e0d9ec17d059a7d87e47dfcba2a3cf620317313a6baf65f"} Jan 06 09:23:12 crc kubenswrapper[4784]: I0106 09:23:12.354442 4784 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 06 09:23:13 crc kubenswrapper[4784]: I0106 09:23:13.365460 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bq42h" event={"ID":"3161e2c1-58ca-4a97-95a1-8085ca4be14c","Type":"ContainerStarted","Data":"b2e785c003dccb2dd8f73a57c2eadbfc16d958012d611685e31ab2468b4b078b"} Jan 06 09:23:15 crc kubenswrapper[4784]: E0106 09:23:15.346948 4784 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.035s" Jan 06 09:23:15 crc kubenswrapper[4784]: I0106 09:23:15.355173 4784 generic.go:334] "Generic (PLEG): container finished" podID="3161e2c1-58ca-4a97-95a1-8085ca4be14c" containerID="b2e785c003dccb2dd8f73a57c2eadbfc16d958012d611685e31ab2468b4b078b" exitCode=0 Jan 06 09:23:15 crc kubenswrapper[4784]: I0106 09:23:15.355251 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bq42h" event={"ID":"3161e2c1-58ca-4a97-95a1-8085ca4be14c","Type":"ContainerDied","Data":"b2e785c003dccb2dd8f73a57c2eadbfc16d958012d611685e31ab2468b4b078b"} Jan 06 09:23:17 crc kubenswrapper[4784]: I0106 09:23:17.391866 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bq42h" event={"ID":"3161e2c1-58ca-4a97-95a1-8085ca4be14c","Type":"ContainerStarted","Data":"75824b62a168a0a9a0557146abbb032530835fb182078676bb9876e68afcf5f6"} Jan 06 09:23:17 crc kubenswrapper[4784]: I0106 09:23:17.431173 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bq42h" podStartSLOduration=2.491634569 podStartE2EDuration="6.431145089s" podCreationTimestamp="2026-01-06 09:23:11 +0000 UTC" firstStartedPulling="2026-01-06 09:23:12.354005431 +0000 UTC m=+4094.400178298" lastFinishedPulling="2026-01-06 09:23:16.293515981 +0000 UTC m=+4098.339688818" observedRunningTime="2026-01-06 09:23:17.419443187 +0000 UTC m=+4099.465616054" watchObservedRunningTime="2026-01-06 09:23:17.431145089 +0000 UTC m=+4099.477317966" Jan 06 09:23:21 crc kubenswrapper[4784]: I0106 09:23:21.440355 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:21 crc kubenswrapper[4784]: I0106 09:23:21.440975 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:21 crc kubenswrapper[4784]: I0106 09:23:21.511156 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:22 crc kubenswrapper[4784]: I0106 09:23:22.508814 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:22 crc kubenswrapper[4784]: I0106 09:23:22.577977 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bq42h"] Jan 06 09:23:24 crc kubenswrapper[4784]: I0106 09:23:24.456808 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bq42h" podUID="3161e2c1-58ca-4a97-95a1-8085ca4be14c" containerName="registry-server" containerID="cri-o://75824b62a168a0a9a0557146abbb032530835fb182078676bb9876e68afcf5f6" gracePeriod=2 Jan 06 09:23:25 crc kubenswrapper[4784]: I0106 09:23:25.469187 4784 generic.go:334] "Generic (PLEG): container finished" podID="3161e2c1-58ca-4a97-95a1-8085ca4be14c" containerID="75824b62a168a0a9a0557146abbb032530835fb182078676bb9876e68afcf5f6" exitCode=0 Jan 06 09:23:25 crc kubenswrapper[4784]: I0106 09:23:25.469271 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bq42h" event={"ID":"3161e2c1-58ca-4a97-95a1-8085ca4be14c","Type":"ContainerDied","Data":"75824b62a168a0a9a0557146abbb032530835fb182078676bb9876e68afcf5f6"} Jan 06 09:23:25 crc kubenswrapper[4784]: I0106 09:23:25.469635 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bq42h" event={"ID":"3161e2c1-58ca-4a97-95a1-8085ca4be14c","Type":"ContainerDied","Data":"49f416f66cc5be671e0d9ec17d059a7d87e47dfcba2a3cf620317313a6baf65f"} Jan 06 09:23:25 crc kubenswrapper[4784]: I0106 09:23:25.469661 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49f416f66cc5be671e0d9ec17d059a7d87e47dfcba2a3cf620317313a6baf65f" Jan 06 09:23:25 crc kubenswrapper[4784]: I0106 09:23:25.502145 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:25 crc kubenswrapper[4784]: I0106 09:23:25.521261 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djb9v\" (UniqueName: \"kubernetes.io/projected/3161e2c1-58ca-4a97-95a1-8085ca4be14c-kube-api-access-djb9v\") pod \"3161e2c1-58ca-4a97-95a1-8085ca4be14c\" (UID: \"3161e2c1-58ca-4a97-95a1-8085ca4be14c\") " Jan 06 09:23:25 crc kubenswrapper[4784]: I0106 09:23:25.521424 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3161e2c1-58ca-4a97-95a1-8085ca4be14c-utilities\") pod \"3161e2c1-58ca-4a97-95a1-8085ca4be14c\" (UID: \"3161e2c1-58ca-4a97-95a1-8085ca4be14c\") " Jan 06 09:23:25 crc kubenswrapper[4784]: I0106 09:23:25.521788 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3161e2c1-58ca-4a97-95a1-8085ca4be14c-catalog-content\") pod \"3161e2c1-58ca-4a97-95a1-8085ca4be14c\" (UID: \"3161e2c1-58ca-4a97-95a1-8085ca4be14c\") " Jan 06 09:23:25 crc kubenswrapper[4784]: I0106 09:23:25.523499 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3161e2c1-58ca-4a97-95a1-8085ca4be14c-utilities" (OuterVolumeSpecName: "utilities") pod "3161e2c1-58ca-4a97-95a1-8085ca4be14c" (UID: "3161e2c1-58ca-4a97-95a1-8085ca4be14c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:23:25 crc kubenswrapper[4784]: I0106 09:23:25.537536 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3161e2c1-58ca-4a97-95a1-8085ca4be14c-kube-api-access-djb9v" (OuterVolumeSpecName: "kube-api-access-djb9v") pod "3161e2c1-58ca-4a97-95a1-8085ca4be14c" (UID: "3161e2c1-58ca-4a97-95a1-8085ca4be14c"). InnerVolumeSpecName "kube-api-access-djb9v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:23:25 crc kubenswrapper[4784]: I0106 09:23:25.571327 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3161e2c1-58ca-4a97-95a1-8085ca4be14c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3161e2c1-58ca-4a97-95a1-8085ca4be14c" (UID: "3161e2c1-58ca-4a97-95a1-8085ca4be14c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:23:25 crc kubenswrapper[4784]: I0106 09:23:25.623027 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djb9v\" (UniqueName: \"kubernetes.io/projected/3161e2c1-58ca-4a97-95a1-8085ca4be14c-kube-api-access-djb9v\") on node \"crc\" DevicePath \"\"" Jan 06 09:23:25 crc kubenswrapper[4784]: I0106 09:23:25.623082 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3161e2c1-58ca-4a97-95a1-8085ca4be14c-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:23:25 crc kubenswrapper[4784]: I0106 09:23:25.623103 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3161e2c1-58ca-4a97-95a1-8085ca4be14c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:23:26 crc kubenswrapper[4784]: I0106 09:23:26.478505 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bq42h" Jan 06 09:23:26 crc kubenswrapper[4784]: I0106 09:23:26.517923 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bq42h"] Jan 06 09:23:26 crc kubenswrapper[4784]: I0106 09:23:26.530412 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bq42h"] Jan 06 09:23:28 crc kubenswrapper[4784]: I0106 09:23:28.369064 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3161e2c1-58ca-4a97-95a1-8085ca4be14c" path="/var/lib/kubelet/pods/3161e2c1-58ca-4a97-95a1-8085ca4be14c/volumes" Jan 06 09:23:44 crc kubenswrapper[4784]: I0106 09:23:44.351462 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:23:44 crc kubenswrapper[4784]: I0106 09:23:44.352301 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:24:14 crc kubenswrapper[4784]: I0106 09:24:14.350895 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:24:14 crc kubenswrapper[4784]: I0106 09:24:14.351594 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:24:44 crc kubenswrapper[4784]: I0106 09:24:44.350877 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:24:44 crc kubenswrapper[4784]: I0106 09:24:44.351574 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:24:44 crc kubenswrapper[4784]: I0106 09:24:44.351656 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 09:24:44 crc kubenswrapper[4784]: I0106 09:24:44.352276 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3d86236ad369d8703dc9c8fcda8ff98b073f6ebb3d44b9e7f892b5724b95ec07"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 09:24:44 crc kubenswrapper[4784]: I0106 09:24:44.353516 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://3d86236ad369d8703dc9c8fcda8ff98b073f6ebb3d44b9e7f892b5724b95ec07" gracePeriod=600 Jan 06 09:24:45 crc kubenswrapper[4784]: I0106 09:24:45.173196 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="3d86236ad369d8703dc9c8fcda8ff98b073f6ebb3d44b9e7f892b5724b95ec07" exitCode=0 Jan 06 09:24:45 crc kubenswrapper[4784]: I0106 09:24:45.173245 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"3d86236ad369d8703dc9c8fcda8ff98b073f6ebb3d44b9e7f892b5724b95ec07"} Jan 06 09:24:45 crc kubenswrapper[4784]: I0106 09:24:45.173824 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322"} Jan 06 09:24:45 crc kubenswrapper[4784]: I0106 09:24:45.173857 4784 scope.go:117] "RemoveContainer" containerID="efccd9176214e6bbb53b29a9206f1146a13cb72605006d69bda2eebc0e379b2b" Jan 06 09:26:12 crc kubenswrapper[4784]: I0106 09:26:12.858648 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7849j"] Jan 06 09:26:12 crc kubenswrapper[4784]: E0106 09:26:12.859977 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3161e2c1-58ca-4a97-95a1-8085ca4be14c" containerName="registry-server" Jan 06 09:26:12 crc kubenswrapper[4784]: I0106 09:26:12.860019 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3161e2c1-58ca-4a97-95a1-8085ca4be14c" containerName="registry-server" Jan 06 09:26:12 crc kubenswrapper[4784]: E0106 09:26:12.860064 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3161e2c1-58ca-4a97-95a1-8085ca4be14c" containerName="extract-utilities" Jan 06 09:26:12 crc kubenswrapper[4784]: I0106 09:26:12.860078 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3161e2c1-58ca-4a97-95a1-8085ca4be14c" containerName="extract-utilities" Jan 06 09:26:12 crc kubenswrapper[4784]: E0106 09:26:12.860103 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3161e2c1-58ca-4a97-95a1-8085ca4be14c" containerName="extract-content" Jan 06 09:26:12 crc kubenswrapper[4784]: I0106 09:26:12.860115 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3161e2c1-58ca-4a97-95a1-8085ca4be14c" containerName="extract-content" Jan 06 09:26:12 crc kubenswrapper[4784]: I0106 09:26:12.860422 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="3161e2c1-58ca-4a97-95a1-8085ca4be14c" containerName="registry-server" Jan 06 09:26:12 crc kubenswrapper[4784]: I0106 09:26:12.862301 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:12 crc kubenswrapper[4784]: I0106 09:26:12.874390 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7849j"] Jan 06 09:26:12 crc kubenswrapper[4784]: I0106 09:26:12.917653 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-catalog-content\") pod \"community-operators-7849j\" (UID: \"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2\") " pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:12 crc kubenswrapper[4784]: I0106 09:26:12.917760 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-utilities\") pod \"community-operators-7849j\" (UID: \"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2\") " pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:12 crc kubenswrapper[4784]: I0106 09:26:12.917822 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckr2p\" (UniqueName: \"kubernetes.io/projected/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-kube-api-access-ckr2p\") pod \"community-operators-7849j\" (UID: \"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2\") " pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:13 crc kubenswrapper[4784]: I0106 09:26:13.018683 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckr2p\" (UniqueName: \"kubernetes.io/projected/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-kube-api-access-ckr2p\") pod \"community-operators-7849j\" (UID: \"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2\") " pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:13 crc kubenswrapper[4784]: I0106 09:26:13.018800 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-catalog-content\") pod \"community-operators-7849j\" (UID: \"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2\") " pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:13 crc kubenswrapper[4784]: I0106 09:26:13.018849 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-utilities\") pod \"community-operators-7849j\" (UID: \"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2\") " pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:13 crc kubenswrapper[4784]: I0106 09:26:13.019566 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-catalog-content\") pod \"community-operators-7849j\" (UID: \"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2\") " pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:13 crc kubenswrapper[4784]: I0106 09:26:13.019582 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-utilities\") pod \"community-operators-7849j\" (UID: \"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2\") " pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:13 crc kubenswrapper[4784]: I0106 09:26:13.066460 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckr2p\" (UniqueName: \"kubernetes.io/projected/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-kube-api-access-ckr2p\") pod \"community-operators-7849j\" (UID: \"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2\") " pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:13 crc kubenswrapper[4784]: I0106 09:26:13.196571 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:14 crc kubenswrapper[4784]: I0106 09:26:14.073286 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7849j"] Jan 06 09:26:14 crc kubenswrapper[4784]: W0106 09:26:14.086767 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8fb9017a_0b64_4199_adf1_9b2e9a1cb9d2.slice/crio-41495e2beab3f98b3461928fe8dfee4283362fbdedf96d907a891c1a827dc44b WatchSource:0}: Error finding container 41495e2beab3f98b3461928fe8dfee4283362fbdedf96d907a891c1a827dc44b: Status 404 returned error can't find the container with id 41495e2beab3f98b3461928fe8dfee4283362fbdedf96d907a891c1a827dc44b Jan 06 09:26:14 crc kubenswrapper[4784]: E0106 09:26:14.563084 4784 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8fb9017a_0b64_4199_adf1_9b2e9a1cb9d2.slice/crio-212af23e56ef1f8aca6983b82c7260c7d2ad688a111ea37b63a7349035d3aa03.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8fb9017a_0b64_4199_adf1_9b2e9a1cb9d2.slice/crio-conmon-212af23e56ef1f8aca6983b82c7260c7d2ad688a111ea37b63a7349035d3aa03.scope\": RecentStats: unable to find data in memory cache]" Jan 06 09:26:15 crc kubenswrapper[4784]: I0106 09:26:15.018892 4784 generic.go:334] "Generic (PLEG): container finished" podID="8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2" containerID="212af23e56ef1f8aca6983b82c7260c7d2ad688a111ea37b63a7349035d3aa03" exitCode=0 Jan 06 09:26:15 crc kubenswrapper[4784]: I0106 09:26:15.018979 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7849j" event={"ID":"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2","Type":"ContainerDied","Data":"212af23e56ef1f8aca6983b82c7260c7d2ad688a111ea37b63a7349035d3aa03"} Jan 06 09:26:15 crc kubenswrapper[4784]: I0106 09:26:15.019368 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7849j" event={"ID":"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2","Type":"ContainerStarted","Data":"41495e2beab3f98b3461928fe8dfee4283362fbdedf96d907a891c1a827dc44b"} Jan 06 09:26:17 crc kubenswrapper[4784]: I0106 09:26:17.037286 4784 generic.go:334] "Generic (PLEG): container finished" podID="8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2" containerID="1b8536380674c20fe8dafe028477b5356bd2f85a0a8d6c81e2fbc72488d31d48" exitCode=0 Jan 06 09:26:17 crc kubenswrapper[4784]: I0106 09:26:17.037691 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7849j" event={"ID":"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2","Type":"ContainerDied","Data":"1b8536380674c20fe8dafe028477b5356bd2f85a0a8d6c81e2fbc72488d31d48"} Jan 06 09:26:18 crc kubenswrapper[4784]: I0106 09:26:18.052050 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7849j" event={"ID":"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2","Type":"ContainerStarted","Data":"f06319c5e6e132260a580eb8b3d97e02b9c19895b1ffb0ea441ab1479bf5a410"} Jan 06 09:26:23 crc kubenswrapper[4784]: I0106 09:26:23.197587 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:23 crc kubenswrapper[4784]: I0106 09:26:23.198576 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:23 crc kubenswrapper[4784]: I0106 09:26:23.282464 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:23 crc kubenswrapper[4784]: I0106 09:26:23.319453 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7849j" podStartSLOduration=8.797013753 podStartE2EDuration="11.319439035s" podCreationTimestamp="2026-01-06 09:26:12 +0000 UTC" firstStartedPulling="2026-01-06 09:26:15.021417685 +0000 UTC m=+4277.067590522" lastFinishedPulling="2026-01-06 09:26:17.543842937 +0000 UTC m=+4279.590015804" observedRunningTime="2026-01-06 09:26:18.088391849 +0000 UTC m=+4280.134564716" watchObservedRunningTime="2026-01-06 09:26:23.319439035 +0000 UTC m=+4285.365611872" Jan 06 09:26:24 crc kubenswrapper[4784]: I0106 09:26:24.180388 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:24 crc kubenswrapper[4784]: I0106 09:26:24.276852 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7849j"] Jan 06 09:26:26 crc kubenswrapper[4784]: I0106 09:26:26.120918 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7849j" podUID="8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2" containerName="registry-server" containerID="cri-o://f06319c5e6e132260a580eb8b3d97e02b9c19895b1ffb0ea441ab1479bf5a410" gracePeriod=2 Jan 06 09:26:26 crc kubenswrapper[4784]: I0106 09:26:26.623193 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:26 crc kubenswrapper[4784]: I0106 09:26:26.762868 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckr2p\" (UniqueName: \"kubernetes.io/projected/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-kube-api-access-ckr2p\") pod \"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2\" (UID: \"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2\") " Jan 06 09:26:26 crc kubenswrapper[4784]: I0106 09:26:26.762954 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-catalog-content\") pod \"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2\" (UID: \"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2\") " Jan 06 09:26:26 crc kubenswrapper[4784]: I0106 09:26:26.763047 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-utilities\") pod \"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2\" (UID: \"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2\") " Jan 06 09:26:26 crc kubenswrapper[4784]: I0106 09:26:26.764825 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-utilities" (OuterVolumeSpecName: "utilities") pod "8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2" (UID: "8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:26:26 crc kubenswrapper[4784]: I0106 09:26:26.775289 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-kube-api-access-ckr2p" (OuterVolumeSpecName: "kube-api-access-ckr2p") pod "8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2" (UID: "8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2"). InnerVolumeSpecName "kube-api-access-ckr2p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:26:26 crc kubenswrapper[4784]: I0106 09:26:26.845673 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2" (UID: "8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:26:26 crc kubenswrapper[4784]: I0106 09:26:26.865001 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckr2p\" (UniqueName: \"kubernetes.io/projected/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-kube-api-access-ckr2p\") on node \"crc\" DevicePath \"\"" Jan 06 09:26:26 crc kubenswrapper[4784]: I0106 09:26:26.865052 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:26:26 crc kubenswrapper[4784]: I0106 09:26:26.865069 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:26:27 crc kubenswrapper[4784]: I0106 09:26:27.136266 4784 generic.go:334] "Generic (PLEG): container finished" podID="8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2" containerID="f06319c5e6e132260a580eb8b3d97e02b9c19895b1ffb0ea441ab1479bf5a410" exitCode=0 Jan 06 09:26:27 crc kubenswrapper[4784]: I0106 09:26:27.136310 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7849j" event={"ID":"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2","Type":"ContainerDied","Data":"f06319c5e6e132260a580eb8b3d97e02b9c19895b1ffb0ea441ab1479bf5a410"} Jan 06 09:26:27 crc kubenswrapper[4784]: I0106 09:26:27.136334 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7849j" event={"ID":"8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2","Type":"ContainerDied","Data":"41495e2beab3f98b3461928fe8dfee4283362fbdedf96d907a891c1a827dc44b"} Jan 06 09:26:27 crc kubenswrapper[4784]: I0106 09:26:27.136351 4784 scope.go:117] "RemoveContainer" containerID="f06319c5e6e132260a580eb8b3d97e02b9c19895b1ffb0ea441ab1479bf5a410" Jan 06 09:26:27 crc kubenswrapper[4784]: I0106 09:26:27.136369 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7849j" Jan 06 09:26:27 crc kubenswrapper[4784]: I0106 09:26:27.166320 4784 scope.go:117] "RemoveContainer" containerID="1b8536380674c20fe8dafe028477b5356bd2f85a0a8d6c81e2fbc72488d31d48" Jan 06 09:26:27 crc kubenswrapper[4784]: I0106 09:26:27.193677 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7849j"] Jan 06 09:26:27 crc kubenswrapper[4784]: I0106 09:26:27.209969 4784 scope.go:117] "RemoveContainer" containerID="212af23e56ef1f8aca6983b82c7260c7d2ad688a111ea37b63a7349035d3aa03" Jan 06 09:26:27 crc kubenswrapper[4784]: I0106 09:26:27.211983 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7849j"] Jan 06 09:26:27 crc kubenswrapper[4784]: I0106 09:26:27.231036 4784 scope.go:117] "RemoveContainer" containerID="f06319c5e6e132260a580eb8b3d97e02b9c19895b1ffb0ea441ab1479bf5a410" Jan 06 09:26:27 crc kubenswrapper[4784]: E0106 09:26:27.231864 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f06319c5e6e132260a580eb8b3d97e02b9c19895b1ffb0ea441ab1479bf5a410\": container with ID starting with f06319c5e6e132260a580eb8b3d97e02b9c19895b1ffb0ea441ab1479bf5a410 not found: ID does not exist" containerID="f06319c5e6e132260a580eb8b3d97e02b9c19895b1ffb0ea441ab1479bf5a410" Jan 06 09:26:27 crc kubenswrapper[4784]: I0106 09:26:27.231918 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f06319c5e6e132260a580eb8b3d97e02b9c19895b1ffb0ea441ab1479bf5a410"} err="failed to get container status \"f06319c5e6e132260a580eb8b3d97e02b9c19895b1ffb0ea441ab1479bf5a410\": rpc error: code = NotFound desc = could not find container \"f06319c5e6e132260a580eb8b3d97e02b9c19895b1ffb0ea441ab1479bf5a410\": container with ID starting with f06319c5e6e132260a580eb8b3d97e02b9c19895b1ffb0ea441ab1479bf5a410 not found: ID does not exist" Jan 06 09:26:27 crc kubenswrapper[4784]: I0106 09:26:27.231952 4784 scope.go:117] "RemoveContainer" containerID="1b8536380674c20fe8dafe028477b5356bd2f85a0a8d6c81e2fbc72488d31d48" Jan 06 09:26:27 crc kubenswrapper[4784]: E0106 09:26:27.232626 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b8536380674c20fe8dafe028477b5356bd2f85a0a8d6c81e2fbc72488d31d48\": container with ID starting with 1b8536380674c20fe8dafe028477b5356bd2f85a0a8d6c81e2fbc72488d31d48 not found: ID does not exist" containerID="1b8536380674c20fe8dafe028477b5356bd2f85a0a8d6c81e2fbc72488d31d48" Jan 06 09:26:27 crc kubenswrapper[4784]: I0106 09:26:27.232665 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b8536380674c20fe8dafe028477b5356bd2f85a0a8d6c81e2fbc72488d31d48"} err="failed to get container status \"1b8536380674c20fe8dafe028477b5356bd2f85a0a8d6c81e2fbc72488d31d48\": rpc error: code = NotFound desc = could not find container \"1b8536380674c20fe8dafe028477b5356bd2f85a0a8d6c81e2fbc72488d31d48\": container with ID starting with 1b8536380674c20fe8dafe028477b5356bd2f85a0a8d6c81e2fbc72488d31d48 not found: ID does not exist" Jan 06 09:26:27 crc kubenswrapper[4784]: I0106 09:26:27.232701 4784 scope.go:117] "RemoveContainer" containerID="212af23e56ef1f8aca6983b82c7260c7d2ad688a111ea37b63a7349035d3aa03" Jan 06 09:26:27 crc kubenswrapper[4784]: E0106 09:26:27.233158 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"212af23e56ef1f8aca6983b82c7260c7d2ad688a111ea37b63a7349035d3aa03\": container with ID starting with 212af23e56ef1f8aca6983b82c7260c7d2ad688a111ea37b63a7349035d3aa03 not found: ID does not exist" containerID="212af23e56ef1f8aca6983b82c7260c7d2ad688a111ea37b63a7349035d3aa03" Jan 06 09:26:27 crc kubenswrapper[4784]: I0106 09:26:27.233186 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"212af23e56ef1f8aca6983b82c7260c7d2ad688a111ea37b63a7349035d3aa03"} err="failed to get container status \"212af23e56ef1f8aca6983b82c7260c7d2ad688a111ea37b63a7349035d3aa03\": rpc error: code = NotFound desc = could not find container \"212af23e56ef1f8aca6983b82c7260c7d2ad688a111ea37b63a7349035d3aa03\": container with ID starting with 212af23e56ef1f8aca6983b82c7260c7d2ad688a111ea37b63a7349035d3aa03 not found: ID does not exist" Jan 06 09:26:28 crc kubenswrapper[4784]: I0106 09:26:28.331626 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2" path="/var/lib/kubelet/pods/8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2/volumes" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.149807 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r6pnq"] Jan 06 09:26:31 crc kubenswrapper[4784]: E0106 09:26:31.150323 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2" containerName="extract-utilities" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.150352 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2" containerName="extract-utilities" Jan 06 09:26:31 crc kubenswrapper[4784]: E0106 09:26:31.150390 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2" containerName="extract-content" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.150409 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2" containerName="extract-content" Jan 06 09:26:31 crc kubenswrapper[4784]: E0106 09:26:31.150455 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2" containerName="registry-server" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.150469 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2" containerName="registry-server" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.150837 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fb9017a-0b64-4199-adf1-9b2e9a1cb9d2" containerName="registry-server" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.152884 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.165587 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r6pnq"] Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.348427 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npdq2\" (UniqueName: \"kubernetes.io/projected/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-kube-api-access-npdq2\") pod \"certified-operators-r6pnq\" (UID: \"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c\") " pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.348855 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-utilities\") pod \"certified-operators-r6pnq\" (UID: \"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c\") " pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.349027 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-catalog-content\") pod \"certified-operators-r6pnq\" (UID: \"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c\") " pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.450473 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npdq2\" (UniqueName: \"kubernetes.io/projected/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-kube-api-access-npdq2\") pod \"certified-operators-r6pnq\" (UID: \"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c\") " pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.450628 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-utilities\") pod \"certified-operators-r6pnq\" (UID: \"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c\") " pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.450695 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-catalog-content\") pod \"certified-operators-r6pnq\" (UID: \"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c\") " pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.451259 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-catalog-content\") pod \"certified-operators-r6pnq\" (UID: \"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c\") " pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.451343 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-utilities\") pod \"certified-operators-r6pnq\" (UID: \"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c\") " pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.479404 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npdq2\" (UniqueName: \"kubernetes.io/projected/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-kube-api-access-npdq2\") pod \"certified-operators-r6pnq\" (UID: \"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c\") " pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:31 crc kubenswrapper[4784]: I0106 09:26:31.482166 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:32 crc kubenswrapper[4784]: I0106 09:26:32.049660 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r6pnq"] Jan 06 09:26:32 crc kubenswrapper[4784]: I0106 09:26:32.183022 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r6pnq" event={"ID":"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c","Type":"ContainerStarted","Data":"548651162ee530d7ec51cf1ccc99c6288394a76ba3079e243a2e48e577797aaf"} Jan 06 09:26:33 crc kubenswrapper[4784]: I0106 09:26:33.194921 4784 generic.go:334] "Generic (PLEG): container finished" podID="de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c" containerID="8073a07117a2904b82a7d39f8d23afa36a20d7f27228c08f3c25dd9820352115" exitCode=0 Jan 06 09:26:33 crc kubenswrapper[4784]: I0106 09:26:33.194991 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r6pnq" event={"ID":"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c","Type":"ContainerDied","Data":"8073a07117a2904b82a7d39f8d23afa36a20d7f27228c08f3c25dd9820352115"} Jan 06 09:26:35 crc kubenswrapper[4784]: I0106 09:26:35.215183 4784 generic.go:334] "Generic (PLEG): container finished" podID="de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c" containerID="900921bdbd17d743fb6c97083f105559720c319bfcf847040e0d1df2f05e30f1" exitCode=0 Jan 06 09:26:35 crc kubenswrapper[4784]: I0106 09:26:35.215253 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r6pnq" event={"ID":"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c","Type":"ContainerDied","Data":"900921bdbd17d743fb6c97083f105559720c319bfcf847040e0d1df2f05e30f1"} Jan 06 09:26:36 crc kubenswrapper[4784]: I0106 09:26:36.229936 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r6pnq" event={"ID":"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c","Type":"ContainerStarted","Data":"3359d816b05ae6b510e717ea20591ba7b8d5e0ef9d8bb2b176feec0f8bf67416"} Jan 06 09:26:36 crc kubenswrapper[4784]: I0106 09:26:36.258117 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r6pnq" podStartSLOduration=2.811605071 podStartE2EDuration="5.258100809s" podCreationTimestamp="2026-01-06 09:26:31 +0000 UTC" firstStartedPulling="2026-01-06 09:26:33.197802862 +0000 UTC m=+4295.243975729" lastFinishedPulling="2026-01-06 09:26:35.64429858 +0000 UTC m=+4297.690471467" observedRunningTime="2026-01-06 09:26:36.253978111 +0000 UTC m=+4298.300150948" watchObservedRunningTime="2026-01-06 09:26:36.258100809 +0000 UTC m=+4298.304273646" Jan 06 09:26:40 crc kubenswrapper[4784]: I0106 09:26:40.075245 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-shf6d"] Jan 06 09:26:40 crc kubenswrapper[4784]: I0106 09:26:40.078255 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:26:40 crc kubenswrapper[4784]: I0106 09:26:40.089055 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-shf6d"] Jan 06 09:26:40 crc kubenswrapper[4784]: I0106 09:26:40.184347 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-utilities\") pod \"redhat-operators-shf6d\" (UID: \"b023ae0f-a6c1-4854-be4e-330eacd8b3bc\") " pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:26:40 crc kubenswrapper[4784]: I0106 09:26:40.184562 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-catalog-content\") pod \"redhat-operators-shf6d\" (UID: \"b023ae0f-a6c1-4854-be4e-330eacd8b3bc\") " pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:26:40 crc kubenswrapper[4784]: I0106 09:26:40.184678 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9cxc\" (UniqueName: \"kubernetes.io/projected/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-kube-api-access-h9cxc\") pod \"redhat-operators-shf6d\" (UID: \"b023ae0f-a6c1-4854-be4e-330eacd8b3bc\") " pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:26:40 crc kubenswrapper[4784]: I0106 09:26:40.286107 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-catalog-content\") pod \"redhat-operators-shf6d\" (UID: \"b023ae0f-a6c1-4854-be4e-330eacd8b3bc\") " pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:26:40 crc kubenswrapper[4784]: I0106 09:26:40.286162 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9cxc\" (UniqueName: \"kubernetes.io/projected/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-kube-api-access-h9cxc\") pod \"redhat-operators-shf6d\" (UID: \"b023ae0f-a6c1-4854-be4e-330eacd8b3bc\") " pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:26:40 crc kubenswrapper[4784]: I0106 09:26:40.286223 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-utilities\") pod \"redhat-operators-shf6d\" (UID: \"b023ae0f-a6c1-4854-be4e-330eacd8b3bc\") " pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:26:40 crc kubenswrapper[4784]: I0106 09:26:40.286567 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-catalog-content\") pod \"redhat-operators-shf6d\" (UID: \"b023ae0f-a6c1-4854-be4e-330eacd8b3bc\") " pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:26:40 crc kubenswrapper[4784]: I0106 09:26:40.286587 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-utilities\") pod \"redhat-operators-shf6d\" (UID: \"b023ae0f-a6c1-4854-be4e-330eacd8b3bc\") " pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:26:40 crc kubenswrapper[4784]: I0106 09:26:40.319145 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9cxc\" (UniqueName: \"kubernetes.io/projected/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-kube-api-access-h9cxc\") pod \"redhat-operators-shf6d\" (UID: \"b023ae0f-a6c1-4854-be4e-330eacd8b3bc\") " pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:26:40 crc kubenswrapper[4784]: I0106 09:26:40.406052 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:26:40 crc kubenswrapper[4784]: I0106 09:26:40.854298 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-shf6d"] Jan 06 09:26:41 crc kubenswrapper[4784]: I0106 09:26:41.275334 4784 generic.go:334] "Generic (PLEG): container finished" podID="b023ae0f-a6c1-4854-be4e-330eacd8b3bc" containerID="603acf8d12e5e4369850b95ddda29e984cabfa1e22c274de037bab4aa7bacf5a" exitCode=0 Jan 06 09:26:41 crc kubenswrapper[4784]: I0106 09:26:41.275410 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-shf6d" event={"ID":"b023ae0f-a6c1-4854-be4e-330eacd8b3bc","Type":"ContainerDied","Data":"603acf8d12e5e4369850b95ddda29e984cabfa1e22c274de037bab4aa7bacf5a"} Jan 06 09:26:41 crc kubenswrapper[4784]: I0106 09:26:41.275452 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-shf6d" event={"ID":"b023ae0f-a6c1-4854-be4e-330eacd8b3bc","Type":"ContainerStarted","Data":"302024d4125cf9ff1f91daef22a774f07ac64985bb4d22e63d4d492b2b88956a"} Jan 06 09:26:41 crc kubenswrapper[4784]: I0106 09:26:41.483271 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:41 crc kubenswrapper[4784]: I0106 09:26:41.483334 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:41 crc kubenswrapper[4784]: I0106 09:26:41.546674 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:42 crc kubenswrapper[4784]: I0106 09:26:42.287846 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-shf6d" event={"ID":"b023ae0f-a6c1-4854-be4e-330eacd8b3bc","Type":"ContainerStarted","Data":"dda86100f38df0a7120a32c1002b081bb31bda56afa9f8bd22e61606bf25ea5d"} Jan 06 09:26:42 crc kubenswrapper[4784]: I0106 09:26:42.752378 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:43 crc kubenswrapper[4784]: I0106 09:26:43.300255 4784 generic.go:334] "Generic (PLEG): container finished" podID="b023ae0f-a6c1-4854-be4e-330eacd8b3bc" containerID="dda86100f38df0a7120a32c1002b081bb31bda56afa9f8bd22e61606bf25ea5d" exitCode=0 Jan 06 09:26:43 crc kubenswrapper[4784]: I0106 09:26:43.300357 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-shf6d" event={"ID":"b023ae0f-a6c1-4854-be4e-330eacd8b3bc","Type":"ContainerDied","Data":"dda86100f38df0a7120a32c1002b081bb31bda56afa9f8bd22e61606bf25ea5d"} Jan 06 09:26:43 crc kubenswrapper[4784]: I0106 09:26:43.846611 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r6pnq"] Jan 06 09:26:44 crc kubenswrapper[4784]: I0106 09:26:44.314196 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r6pnq" podUID="de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c" containerName="registry-server" containerID="cri-o://3359d816b05ae6b510e717ea20591ba7b8d5e0ef9d8bb2b176feec0f8bf67416" gracePeriod=2 Jan 06 09:26:44 crc kubenswrapper[4784]: I0106 09:26:44.329076 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-shf6d" event={"ID":"b023ae0f-a6c1-4854-be4e-330eacd8b3bc","Type":"ContainerStarted","Data":"16a12341feacb4efede175d7576b433552267fbbd07afa5166a93404f01e70e1"} Jan 06 09:26:44 crc kubenswrapper[4784]: I0106 09:26:44.351807 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:26:44 crc kubenswrapper[4784]: I0106 09:26:44.351955 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:26:44 crc kubenswrapper[4784]: I0106 09:26:44.354613 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-shf6d" podStartSLOduration=1.653257393 podStartE2EDuration="4.354587342s" podCreationTimestamp="2026-01-06 09:26:40 +0000 UTC" firstStartedPulling="2026-01-06 09:26:41.278276698 +0000 UTC m=+4303.324449575" lastFinishedPulling="2026-01-06 09:26:43.979606657 +0000 UTC m=+4306.025779524" observedRunningTime="2026-01-06 09:26:44.351860978 +0000 UTC m=+4306.398033855" watchObservedRunningTime="2026-01-06 09:26:44.354587342 +0000 UTC m=+4306.400760189" Jan 06 09:26:45 crc kubenswrapper[4784]: I0106 09:26:45.769282 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:45 crc kubenswrapper[4784]: I0106 09:26:45.804745 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-npdq2\" (UniqueName: \"kubernetes.io/projected/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-kube-api-access-npdq2\") pod \"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c\" (UID: \"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c\") " Jan 06 09:26:45 crc kubenswrapper[4784]: I0106 09:26:45.804918 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-utilities\") pod \"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c\" (UID: \"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c\") " Jan 06 09:26:45 crc kubenswrapper[4784]: I0106 09:26:45.804980 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-catalog-content\") pod \"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c\" (UID: \"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c\") " Jan 06 09:26:45 crc kubenswrapper[4784]: I0106 09:26:45.805889 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-utilities" (OuterVolumeSpecName: "utilities") pod "de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c" (UID: "de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:26:45 crc kubenswrapper[4784]: I0106 09:26:45.806968 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:26:45 crc kubenswrapper[4784]: I0106 09:26:45.810854 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-kube-api-access-npdq2" (OuterVolumeSpecName: "kube-api-access-npdq2") pod "de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c" (UID: "de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c"). InnerVolumeSpecName "kube-api-access-npdq2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:26:45 crc kubenswrapper[4784]: I0106 09:26:45.863639 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c" (UID: "de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:26:45 crc kubenswrapper[4784]: I0106 09:26:45.908392 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:26:45 crc kubenswrapper[4784]: I0106 09:26:45.908426 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-npdq2\" (UniqueName: \"kubernetes.io/projected/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c-kube-api-access-npdq2\") on node \"crc\" DevicePath \"\"" Jan 06 09:26:46 crc kubenswrapper[4784]: I0106 09:26:46.331699 4784 generic.go:334] "Generic (PLEG): container finished" podID="de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c" containerID="3359d816b05ae6b510e717ea20591ba7b8d5e0ef9d8bb2b176feec0f8bf67416" exitCode=0 Jan 06 09:26:46 crc kubenswrapper[4784]: I0106 09:26:46.331775 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r6pnq" event={"ID":"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c","Type":"ContainerDied","Data":"3359d816b05ae6b510e717ea20591ba7b8d5e0ef9d8bb2b176feec0f8bf67416"} Jan 06 09:26:46 crc kubenswrapper[4784]: I0106 09:26:46.331958 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r6pnq" event={"ID":"de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c","Type":"ContainerDied","Data":"548651162ee530d7ec51cf1ccc99c6288394a76ba3079e243a2e48e577797aaf"} Jan 06 09:26:46 crc kubenswrapper[4784]: I0106 09:26:46.331832 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r6pnq" Jan 06 09:26:46 crc kubenswrapper[4784]: I0106 09:26:46.332005 4784 scope.go:117] "RemoveContainer" containerID="3359d816b05ae6b510e717ea20591ba7b8d5e0ef9d8bb2b176feec0f8bf67416" Jan 06 09:26:46 crc kubenswrapper[4784]: I0106 09:26:46.374819 4784 scope.go:117] "RemoveContainer" containerID="900921bdbd17d743fb6c97083f105559720c319bfcf847040e0d1df2f05e30f1" Jan 06 09:26:46 crc kubenswrapper[4784]: I0106 09:26:46.378135 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r6pnq"] Jan 06 09:26:46 crc kubenswrapper[4784]: I0106 09:26:46.386436 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r6pnq"] Jan 06 09:26:46 crc kubenswrapper[4784]: I0106 09:26:46.404378 4784 scope.go:117] "RemoveContainer" containerID="8073a07117a2904b82a7d39f8d23afa36a20d7f27228c08f3c25dd9820352115" Jan 06 09:26:46 crc kubenswrapper[4784]: I0106 09:26:46.448935 4784 scope.go:117] "RemoveContainer" containerID="3359d816b05ae6b510e717ea20591ba7b8d5e0ef9d8bb2b176feec0f8bf67416" Jan 06 09:26:46 crc kubenswrapper[4784]: E0106 09:26:46.449990 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3359d816b05ae6b510e717ea20591ba7b8d5e0ef9d8bb2b176feec0f8bf67416\": container with ID starting with 3359d816b05ae6b510e717ea20591ba7b8d5e0ef9d8bb2b176feec0f8bf67416 not found: ID does not exist" containerID="3359d816b05ae6b510e717ea20591ba7b8d5e0ef9d8bb2b176feec0f8bf67416" Jan 06 09:26:46 crc kubenswrapper[4784]: I0106 09:26:46.450041 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3359d816b05ae6b510e717ea20591ba7b8d5e0ef9d8bb2b176feec0f8bf67416"} err="failed to get container status \"3359d816b05ae6b510e717ea20591ba7b8d5e0ef9d8bb2b176feec0f8bf67416\": rpc error: code = NotFound desc = could not find container \"3359d816b05ae6b510e717ea20591ba7b8d5e0ef9d8bb2b176feec0f8bf67416\": container with ID starting with 3359d816b05ae6b510e717ea20591ba7b8d5e0ef9d8bb2b176feec0f8bf67416 not found: ID does not exist" Jan 06 09:26:46 crc kubenswrapper[4784]: I0106 09:26:46.450075 4784 scope.go:117] "RemoveContainer" containerID="900921bdbd17d743fb6c97083f105559720c319bfcf847040e0d1df2f05e30f1" Jan 06 09:26:46 crc kubenswrapper[4784]: E0106 09:26:46.450666 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"900921bdbd17d743fb6c97083f105559720c319bfcf847040e0d1df2f05e30f1\": container with ID starting with 900921bdbd17d743fb6c97083f105559720c319bfcf847040e0d1df2f05e30f1 not found: ID does not exist" containerID="900921bdbd17d743fb6c97083f105559720c319bfcf847040e0d1df2f05e30f1" Jan 06 09:26:46 crc kubenswrapper[4784]: I0106 09:26:46.450736 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"900921bdbd17d743fb6c97083f105559720c319bfcf847040e0d1df2f05e30f1"} err="failed to get container status \"900921bdbd17d743fb6c97083f105559720c319bfcf847040e0d1df2f05e30f1\": rpc error: code = NotFound desc = could not find container \"900921bdbd17d743fb6c97083f105559720c319bfcf847040e0d1df2f05e30f1\": container with ID starting with 900921bdbd17d743fb6c97083f105559720c319bfcf847040e0d1df2f05e30f1 not found: ID does not exist" Jan 06 09:26:46 crc kubenswrapper[4784]: I0106 09:26:46.450775 4784 scope.go:117] "RemoveContainer" containerID="8073a07117a2904b82a7d39f8d23afa36a20d7f27228c08f3c25dd9820352115" Jan 06 09:26:46 crc kubenswrapper[4784]: E0106 09:26:46.451415 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8073a07117a2904b82a7d39f8d23afa36a20d7f27228c08f3c25dd9820352115\": container with ID starting with 8073a07117a2904b82a7d39f8d23afa36a20d7f27228c08f3c25dd9820352115 not found: ID does not exist" containerID="8073a07117a2904b82a7d39f8d23afa36a20d7f27228c08f3c25dd9820352115" Jan 06 09:26:46 crc kubenswrapper[4784]: I0106 09:26:46.451454 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8073a07117a2904b82a7d39f8d23afa36a20d7f27228c08f3c25dd9820352115"} err="failed to get container status \"8073a07117a2904b82a7d39f8d23afa36a20d7f27228c08f3c25dd9820352115\": rpc error: code = NotFound desc = could not find container \"8073a07117a2904b82a7d39f8d23afa36a20d7f27228c08f3c25dd9820352115\": container with ID starting with 8073a07117a2904b82a7d39f8d23afa36a20d7f27228c08f3c25dd9820352115 not found: ID does not exist" Jan 06 09:26:48 crc kubenswrapper[4784]: I0106 09:26:48.326018 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c" path="/var/lib/kubelet/pods/de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c/volumes" Jan 06 09:26:50 crc kubenswrapper[4784]: I0106 09:26:50.406293 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:26:50 crc kubenswrapper[4784]: I0106 09:26:50.406353 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:26:51 crc kubenswrapper[4784]: I0106 09:26:51.472183 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-shf6d" podUID="b023ae0f-a6c1-4854-be4e-330eacd8b3bc" containerName="registry-server" probeResult="failure" output=< Jan 06 09:26:51 crc kubenswrapper[4784]: timeout: failed to connect service ":50051" within 1s Jan 06 09:26:51 crc kubenswrapper[4784]: > Jan 06 09:27:00 crc kubenswrapper[4784]: I0106 09:27:00.487760 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:27:00 crc kubenswrapper[4784]: I0106 09:27:00.550077 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:27:00 crc kubenswrapper[4784]: I0106 09:27:00.741108 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-shf6d"] Jan 06 09:27:02 crc kubenswrapper[4784]: I0106 09:27:02.479301 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-shf6d" podUID="b023ae0f-a6c1-4854-be4e-330eacd8b3bc" containerName="registry-server" containerID="cri-o://16a12341feacb4efede175d7576b433552267fbbd07afa5166a93404f01e70e1" gracePeriod=2 Jan 06 09:27:02 crc kubenswrapper[4784]: I0106 09:27:02.856590 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:27:02 crc kubenswrapper[4784]: I0106 09:27:02.985750 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-utilities\") pod \"b023ae0f-a6c1-4854-be4e-330eacd8b3bc\" (UID: \"b023ae0f-a6c1-4854-be4e-330eacd8b3bc\") " Jan 06 09:27:02 crc kubenswrapper[4784]: I0106 09:27:02.985838 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h9cxc\" (UniqueName: \"kubernetes.io/projected/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-kube-api-access-h9cxc\") pod \"b023ae0f-a6c1-4854-be4e-330eacd8b3bc\" (UID: \"b023ae0f-a6c1-4854-be4e-330eacd8b3bc\") " Jan 06 09:27:02 crc kubenswrapper[4784]: I0106 09:27:02.986716 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-catalog-content\") pod \"b023ae0f-a6c1-4854-be4e-330eacd8b3bc\" (UID: \"b023ae0f-a6c1-4854-be4e-330eacd8b3bc\") " Jan 06 09:27:02 crc kubenswrapper[4784]: I0106 09:27:02.986872 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-utilities" (OuterVolumeSpecName: "utilities") pod "b023ae0f-a6c1-4854-be4e-330eacd8b3bc" (UID: "b023ae0f-a6c1-4854-be4e-330eacd8b3bc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:27:02 crc kubenswrapper[4784]: I0106 09:27:02.987275 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:27:02 crc kubenswrapper[4784]: I0106 09:27:02.994232 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-kube-api-access-h9cxc" (OuterVolumeSpecName: "kube-api-access-h9cxc") pod "b023ae0f-a6c1-4854-be4e-330eacd8b3bc" (UID: "b023ae0f-a6c1-4854-be4e-330eacd8b3bc"). InnerVolumeSpecName "kube-api-access-h9cxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.088966 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h9cxc\" (UniqueName: \"kubernetes.io/projected/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-kube-api-access-h9cxc\") on node \"crc\" DevicePath \"\"" Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.164670 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b023ae0f-a6c1-4854-be4e-330eacd8b3bc" (UID: "b023ae0f-a6c1-4854-be4e-330eacd8b3bc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.189967 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b023ae0f-a6c1-4854-be4e-330eacd8b3bc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.496366 4784 generic.go:334] "Generic (PLEG): container finished" podID="b023ae0f-a6c1-4854-be4e-330eacd8b3bc" containerID="16a12341feacb4efede175d7576b433552267fbbd07afa5166a93404f01e70e1" exitCode=0 Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.496411 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-shf6d" event={"ID":"b023ae0f-a6c1-4854-be4e-330eacd8b3bc","Type":"ContainerDied","Data":"16a12341feacb4efede175d7576b433552267fbbd07afa5166a93404f01e70e1"} Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.496444 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-shf6d" event={"ID":"b023ae0f-a6c1-4854-be4e-330eacd8b3bc","Type":"ContainerDied","Data":"302024d4125cf9ff1f91daef22a774f07ac64985bb4d22e63d4d492b2b88956a"} Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.496503 4784 scope.go:117] "RemoveContainer" containerID="16a12341feacb4efede175d7576b433552267fbbd07afa5166a93404f01e70e1" Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.496582 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-shf6d" Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.536754 4784 scope.go:117] "RemoveContainer" containerID="dda86100f38df0a7120a32c1002b081bb31bda56afa9f8bd22e61606bf25ea5d" Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.567245 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-shf6d"] Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.576865 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-shf6d"] Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.595384 4784 scope.go:117] "RemoveContainer" containerID="603acf8d12e5e4369850b95ddda29e984cabfa1e22c274de037bab4aa7bacf5a" Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.629382 4784 scope.go:117] "RemoveContainer" containerID="16a12341feacb4efede175d7576b433552267fbbd07afa5166a93404f01e70e1" Jan 06 09:27:03 crc kubenswrapper[4784]: E0106 09:27:03.629958 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16a12341feacb4efede175d7576b433552267fbbd07afa5166a93404f01e70e1\": container with ID starting with 16a12341feacb4efede175d7576b433552267fbbd07afa5166a93404f01e70e1 not found: ID does not exist" containerID="16a12341feacb4efede175d7576b433552267fbbd07afa5166a93404f01e70e1" Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.630013 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16a12341feacb4efede175d7576b433552267fbbd07afa5166a93404f01e70e1"} err="failed to get container status \"16a12341feacb4efede175d7576b433552267fbbd07afa5166a93404f01e70e1\": rpc error: code = NotFound desc = could not find container \"16a12341feacb4efede175d7576b433552267fbbd07afa5166a93404f01e70e1\": container with ID starting with 16a12341feacb4efede175d7576b433552267fbbd07afa5166a93404f01e70e1 not found: ID does not exist" Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.630046 4784 scope.go:117] "RemoveContainer" containerID="dda86100f38df0a7120a32c1002b081bb31bda56afa9f8bd22e61606bf25ea5d" Jan 06 09:27:03 crc kubenswrapper[4784]: E0106 09:27:03.630452 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dda86100f38df0a7120a32c1002b081bb31bda56afa9f8bd22e61606bf25ea5d\": container with ID starting with dda86100f38df0a7120a32c1002b081bb31bda56afa9f8bd22e61606bf25ea5d not found: ID does not exist" containerID="dda86100f38df0a7120a32c1002b081bb31bda56afa9f8bd22e61606bf25ea5d" Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.630493 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dda86100f38df0a7120a32c1002b081bb31bda56afa9f8bd22e61606bf25ea5d"} err="failed to get container status \"dda86100f38df0a7120a32c1002b081bb31bda56afa9f8bd22e61606bf25ea5d\": rpc error: code = NotFound desc = could not find container \"dda86100f38df0a7120a32c1002b081bb31bda56afa9f8bd22e61606bf25ea5d\": container with ID starting with dda86100f38df0a7120a32c1002b081bb31bda56afa9f8bd22e61606bf25ea5d not found: ID does not exist" Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.630519 4784 scope.go:117] "RemoveContainer" containerID="603acf8d12e5e4369850b95ddda29e984cabfa1e22c274de037bab4aa7bacf5a" Jan 06 09:27:03 crc kubenswrapper[4784]: E0106 09:27:03.630862 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"603acf8d12e5e4369850b95ddda29e984cabfa1e22c274de037bab4aa7bacf5a\": container with ID starting with 603acf8d12e5e4369850b95ddda29e984cabfa1e22c274de037bab4aa7bacf5a not found: ID does not exist" containerID="603acf8d12e5e4369850b95ddda29e984cabfa1e22c274de037bab4aa7bacf5a" Jan 06 09:27:03 crc kubenswrapper[4784]: I0106 09:27:03.630904 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"603acf8d12e5e4369850b95ddda29e984cabfa1e22c274de037bab4aa7bacf5a"} err="failed to get container status \"603acf8d12e5e4369850b95ddda29e984cabfa1e22c274de037bab4aa7bacf5a\": rpc error: code = NotFound desc = could not find container \"603acf8d12e5e4369850b95ddda29e984cabfa1e22c274de037bab4aa7bacf5a\": container with ID starting with 603acf8d12e5e4369850b95ddda29e984cabfa1e22c274de037bab4aa7bacf5a not found: ID does not exist" Jan 06 09:27:04 crc kubenswrapper[4784]: I0106 09:27:04.327259 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b023ae0f-a6c1-4854-be4e-330eacd8b3bc" path="/var/lib/kubelet/pods/b023ae0f-a6c1-4854-be4e-330eacd8b3bc/volumes" Jan 06 09:27:14 crc kubenswrapper[4784]: I0106 09:27:14.351137 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:27:14 crc kubenswrapper[4784]: I0106 09:27:14.351766 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:27:44 crc kubenswrapper[4784]: I0106 09:27:44.350514 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:27:44 crc kubenswrapper[4784]: I0106 09:27:44.351251 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:27:44 crc kubenswrapper[4784]: I0106 09:27:44.351306 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 09:27:44 crc kubenswrapper[4784]: I0106 09:27:44.352127 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 09:27:44 crc kubenswrapper[4784]: I0106 09:27:44.352205 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" gracePeriod=600 Jan 06 09:27:44 crc kubenswrapper[4784]: I0106 09:27:44.906239 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" exitCode=0 Jan 06 09:27:44 crc kubenswrapper[4784]: I0106 09:27:44.906485 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322"} Jan 06 09:27:44 crc kubenswrapper[4784]: I0106 09:27:44.906742 4784 scope.go:117] "RemoveContainer" containerID="3d86236ad369d8703dc9c8fcda8ff98b073f6ebb3d44b9e7f892b5724b95ec07" Jan 06 09:27:45 crc kubenswrapper[4784]: E0106 09:27:45.351449 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:27:45 crc kubenswrapper[4784]: I0106 09:27:45.919202 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:27:45 crc kubenswrapper[4784]: E0106 09:27:45.919631 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:27:58 crc kubenswrapper[4784]: I0106 09:27:58.322807 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:27:58 crc kubenswrapper[4784]: E0106 09:27:58.326285 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:28:11 crc kubenswrapper[4784]: I0106 09:28:11.312774 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:28:11 crc kubenswrapper[4784]: E0106 09:28:11.313933 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:28:24 crc kubenswrapper[4784]: I0106 09:28:24.312710 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:28:24 crc kubenswrapper[4784]: E0106 09:28:24.313701 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:28:39 crc kubenswrapper[4784]: I0106 09:28:39.311932 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:28:39 crc kubenswrapper[4784]: E0106 09:28:39.312925 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:28:50 crc kubenswrapper[4784]: I0106 09:28:50.312264 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:28:50 crc kubenswrapper[4784]: E0106 09:28:50.313295 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:29:04 crc kubenswrapper[4784]: I0106 09:29:04.312526 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:29:04 crc kubenswrapper[4784]: E0106 09:29:04.313838 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:29:16 crc kubenswrapper[4784]: I0106 09:29:16.312906 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:29:16 crc kubenswrapper[4784]: E0106 09:29:16.313559 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:29:17 crc kubenswrapper[4784]: I0106 09:29:17.126733 4784 scope.go:117] "RemoveContainer" containerID="ace6a42e829aa948c4f582f1d2c5fe9455cb3ab79693cb7ea67f2ced39ac652e" Jan 06 09:29:17 crc kubenswrapper[4784]: I0106 09:29:17.168599 4784 scope.go:117] "RemoveContainer" containerID="75824b62a168a0a9a0557146abbb032530835fb182078676bb9876e68afcf5f6" Jan 06 09:29:17 crc kubenswrapper[4784]: I0106 09:29:17.202069 4784 scope.go:117] "RemoveContainer" containerID="b2e785c003dccb2dd8f73a57c2eadbfc16d958012d611685e31ab2468b4b078b" Jan 06 09:29:29 crc kubenswrapper[4784]: I0106 09:29:29.312285 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:29:29 crc kubenswrapper[4784]: E0106 09:29:29.313057 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:29:43 crc kubenswrapper[4784]: I0106 09:29:43.314405 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:29:43 crc kubenswrapper[4784]: E0106 09:29:43.315367 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:29:57 crc kubenswrapper[4784]: I0106 09:29:57.312291 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:29:57 crc kubenswrapper[4784]: E0106 09:29:57.313534 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.196332 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6"] Jan 06 09:30:00 crc kubenswrapper[4784]: E0106 09:30:00.197483 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b023ae0f-a6c1-4854-be4e-330eacd8b3bc" containerName="extract-utilities" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.197506 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b023ae0f-a6c1-4854-be4e-330eacd8b3bc" containerName="extract-utilities" Jan 06 09:30:00 crc kubenswrapper[4784]: E0106 09:30:00.197533 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b023ae0f-a6c1-4854-be4e-330eacd8b3bc" containerName="extract-content" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.197568 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b023ae0f-a6c1-4854-be4e-330eacd8b3bc" containerName="extract-content" Jan 06 09:30:00 crc kubenswrapper[4784]: E0106 09:30:00.197594 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c" containerName="extract-content" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.197607 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c" containerName="extract-content" Jan 06 09:30:00 crc kubenswrapper[4784]: E0106 09:30:00.197627 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b023ae0f-a6c1-4854-be4e-330eacd8b3bc" containerName="registry-server" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.197639 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b023ae0f-a6c1-4854-be4e-330eacd8b3bc" containerName="registry-server" Jan 06 09:30:00 crc kubenswrapper[4784]: E0106 09:30:00.197663 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c" containerName="registry-server" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.197674 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c" containerName="registry-server" Jan 06 09:30:00 crc kubenswrapper[4784]: E0106 09:30:00.197703 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c" containerName="extract-utilities" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.197715 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c" containerName="extract-utilities" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.197928 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="de65e904-fbf9-4f5e-ab1e-998fc2cbdf3c" containerName="registry-server" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.197954 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="b023ae0f-a6c1-4854-be4e-330eacd8b3bc" containerName="registry-server" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.198713 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.202142 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.202715 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.212622 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6"] Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.234110 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4464f754-3667-49f6-a8b1-f9ca80dca46f-config-volume\") pod \"collect-profiles-29461530-t8rm6\" (UID: \"4464f754-3667-49f6-a8b1-f9ca80dca46f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.234236 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4464f754-3667-49f6-a8b1-f9ca80dca46f-secret-volume\") pod \"collect-profiles-29461530-t8rm6\" (UID: \"4464f754-3667-49f6-a8b1-f9ca80dca46f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.234285 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzjj5\" (UniqueName: \"kubernetes.io/projected/4464f754-3667-49f6-a8b1-f9ca80dca46f-kube-api-access-mzjj5\") pod \"collect-profiles-29461530-t8rm6\" (UID: \"4464f754-3667-49f6-a8b1-f9ca80dca46f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.335398 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzjj5\" (UniqueName: \"kubernetes.io/projected/4464f754-3667-49f6-a8b1-f9ca80dca46f-kube-api-access-mzjj5\") pod \"collect-profiles-29461530-t8rm6\" (UID: \"4464f754-3667-49f6-a8b1-f9ca80dca46f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.335501 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4464f754-3667-49f6-a8b1-f9ca80dca46f-config-volume\") pod \"collect-profiles-29461530-t8rm6\" (UID: \"4464f754-3667-49f6-a8b1-f9ca80dca46f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.335641 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4464f754-3667-49f6-a8b1-f9ca80dca46f-secret-volume\") pod \"collect-profiles-29461530-t8rm6\" (UID: \"4464f754-3667-49f6-a8b1-f9ca80dca46f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.337431 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4464f754-3667-49f6-a8b1-f9ca80dca46f-config-volume\") pod \"collect-profiles-29461530-t8rm6\" (UID: \"4464f754-3667-49f6-a8b1-f9ca80dca46f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.345111 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4464f754-3667-49f6-a8b1-f9ca80dca46f-secret-volume\") pod \"collect-profiles-29461530-t8rm6\" (UID: \"4464f754-3667-49f6-a8b1-f9ca80dca46f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.369211 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzjj5\" (UniqueName: \"kubernetes.io/projected/4464f754-3667-49f6-a8b1-f9ca80dca46f-kube-api-access-mzjj5\") pod \"collect-profiles-29461530-t8rm6\" (UID: \"4464f754-3667-49f6-a8b1-f9ca80dca46f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" Jan 06 09:30:00 crc kubenswrapper[4784]: I0106 09:30:00.528948 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" Jan 06 09:30:01 crc kubenswrapper[4784]: I0106 09:30:01.002502 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6"] Jan 06 09:30:01 crc kubenswrapper[4784]: I0106 09:30:01.142112 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" event={"ID":"4464f754-3667-49f6-a8b1-f9ca80dca46f","Type":"ContainerStarted","Data":"7acac7eaad861b4262bc3f9b0ed4fe14841321b90d245b530870d347506249c3"} Jan 06 09:30:02 crc kubenswrapper[4784]: I0106 09:30:02.154630 4784 generic.go:334] "Generic (PLEG): container finished" podID="4464f754-3667-49f6-a8b1-f9ca80dca46f" containerID="778c08fb39e99ff7214290fd2728b03860ffa349755ef7e1cc0eedb2e75b1c31" exitCode=0 Jan 06 09:30:02 crc kubenswrapper[4784]: I0106 09:30:02.154884 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" event={"ID":"4464f754-3667-49f6-a8b1-f9ca80dca46f","Type":"ContainerDied","Data":"778c08fb39e99ff7214290fd2728b03860ffa349755ef7e1cc0eedb2e75b1c31"} Jan 06 09:30:03 crc kubenswrapper[4784]: I0106 09:30:03.514997 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" Jan 06 09:30:03 crc kubenswrapper[4784]: I0106 09:30:03.596978 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4464f754-3667-49f6-a8b1-f9ca80dca46f-config-volume\") pod \"4464f754-3667-49f6-a8b1-f9ca80dca46f\" (UID: \"4464f754-3667-49f6-a8b1-f9ca80dca46f\") " Jan 06 09:30:03 crc kubenswrapper[4784]: I0106 09:30:03.597086 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mzjj5\" (UniqueName: \"kubernetes.io/projected/4464f754-3667-49f6-a8b1-f9ca80dca46f-kube-api-access-mzjj5\") pod \"4464f754-3667-49f6-a8b1-f9ca80dca46f\" (UID: \"4464f754-3667-49f6-a8b1-f9ca80dca46f\") " Jan 06 09:30:03 crc kubenswrapper[4784]: I0106 09:30:03.597168 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4464f754-3667-49f6-a8b1-f9ca80dca46f-secret-volume\") pod \"4464f754-3667-49f6-a8b1-f9ca80dca46f\" (UID: \"4464f754-3667-49f6-a8b1-f9ca80dca46f\") " Jan 06 09:30:03 crc kubenswrapper[4784]: I0106 09:30:03.600183 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4464f754-3667-49f6-a8b1-f9ca80dca46f-config-volume" (OuterVolumeSpecName: "config-volume") pod "4464f754-3667-49f6-a8b1-f9ca80dca46f" (UID: "4464f754-3667-49f6-a8b1-f9ca80dca46f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:30:03 crc kubenswrapper[4784]: I0106 09:30:03.604106 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4464f754-3667-49f6-a8b1-f9ca80dca46f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4464f754-3667-49f6-a8b1-f9ca80dca46f" (UID: "4464f754-3667-49f6-a8b1-f9ca80dca46f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:30:03 crc kubenswrapper[4784]: I0106 09:30:03.606750 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4464f754-3667-49f6-a8b1-f9ca80dca46f-kube-api-access-mzjj5" (OuterVolumeSpecName: "kube-api-access-mzjj5") pod "4464f754-3667-49f6-a8b1-f9ca80dca46f" (UID: "4464f754-3667-49f6-a8b1-f9ca80dca46f"). InnerVolumeSpecName "kube-api-access-mzjj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:30:03 crc kubenswrapper[4784]: I0106 09:30:03.699296 4784 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4464f754-3667-49f6-a8b1-f9ca80dca46f-config-volume\") on node \"crc\" DevicePath \"\"" Jan 06 09:30:03 crc kubenswrapper[4784]: I0106 09:30:03.700051 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mzjj5\" (UniqueName: \"kubernetes.io/projected/4464f754-3667-49f6-a8b1-f9ca80dca46f-kube-api-access-mzjj5\") on node \"crc\" DevicePath \"\"" Jan 06 09:30:03 crc kubenswrapper[4784]: I0106 09:30:03.700097 4784 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4464f754-3667-49f6-a8b1-f9ca80dca46f-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 06 09:30:04 crc kubenswrapper[4784]: I0106 09:30:04.180764 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" event={"ID":"4464f754-3667-49f6-a8b1-f9ca80dca46f","Type":"ContainerDied","Data":"7acac7eaad861b4262bc3f9b0ed4fe14841321b90d245b530870d347506249c3"} Jan 06 09:30:04 crc kubenswrapper[4784]: I0106 09:30:04.180875 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7acac7eaad861b4262bc3f9b0ed4fe14841321b90d245b530870d347506249c3" Jan 06 09:30:04 crc kubenswrapper[4784]: I0106 09:30:04.180890 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461530-t8rm6" Jan 06 09:30:04 crc kubenswrapper[4784]: I0106 09:30:04.599441 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg"] Jan 06 09:30:04 crc kubenswrapper[4784]: I0106 09:30:04.605508 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461485-kstgg"] Jan 06 09:30:06 crc kubenswrapper[4784]: I0106 09:30:06.325363 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72c8f74c-6820-4a79-809e-52284e112277" path="/var/lib/kubelet/pods/72c8f74c-6820-4a79-809e-52284e112277/volumes" Jan 06 09:30:09 crc kubenswrapper[4784]: I0106 09:30:09.312924 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:30:09 crc kubenswrapper[4784]: E0106 09:30:09.313806 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:30:17 crc kubenswrapper[4784]: I0106 09:30:17.262712 4784 scope.go:117] "RemoveContainer" containerID="203c20b89f8715ee94c8fc711866de9ff21874a51f7bb10faad1778949049337" Jan 06 09:30:24 crc kubenswrapper[4784]: I0106 09:30:24.312468 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:30:24 crc kubenswrapper[4784]: E0106 09:30:24.313309 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:30:38 crc kubenswrapper[4784]: I0106 09:30:38.324885 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:30:38 crc kubenswrapper[4784]: E0106 09:30:38.328444 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:30:53 crc kubenswrapper[4784]: I0106 09:30:53.312536 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:30:53 crc kubenswrapper[4784]: E0106 09:30:53.313744 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:31:05 crc kubenswrapper[4784]: I0106 09:31:05.312604 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:31:05 crc kubenswrapper[4784]: E0106 09:31:05.313598 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:31:17 crc kubenswrapper[4784]: I0106 09:31:17.312686 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:31:17 crc kubenswrapper[4784]: E0106 09:31:17.313705 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:31:28 crc kubenswrapper[4784]: I0106 09:31:28.324303 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:31:28 crc kubenswrapper[4784]: E0106 09:31:28.325317 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:31:43 crc kubenswrapper[4784]: I0106 09:31:43.313806 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:31:43 crc kubenswrapper[4784]: E0106 09:31:43.317715 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:31:56 crc kubenswrapper[4784]: I0106 09:31:56.312693 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:31:56 crc kubenswrapper[4784]: E0106 09:31:56.313806 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:32:08 crc kubenswrapper[4784]: I0106 09:32:08.320303 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:32:08 crc kubenswrapper[4784]: E0106 09:32:08.321605 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:32:19 crc kubenswrapper[4784]: I0106 09:32:19.313421 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:32:19 crc kubenswrapper[4784]: E0106 09:32:19.314299 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:32:31 crc kubenswrapper[4784]: I0106 09:32:31.312420 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:32:31 crc kubenswrapper[4784]: E0106 09:32:31.313408 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:32:46 crc kubenswrapper[4784]: I0106 09:32:46.312352 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:32:46 crc kubenswrapper[4784]: I0106 09:32:46.696082 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"b7b9e448a6f33bd942c79767c82389e1acef1e178a7310c3b4956a84d6bbce1a"} Jan 06 09:35:14 crc kubenswrapper[4784]: I0106 09:35:14.452233 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:35:14 crc kubenswrapper[4784]: I0106 09:35:14.453753 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:35:35 crc kubenswrapper[4784]: I0106 09:35:35.964499 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-t46db"] Jan 06 09:35:35 crc kubenswrapper[4784]: I0106 09:35:35.969732 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-t46db"] Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.143169 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-mg2wc"] Jan 06 09:35:36 crc kubenswrapper[4784]: E0106 09:35:36.143674 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4464f754-3667-49f6-a8b1-f9ca80dca46f" containerName="collect-profiles" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.143704 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="4464f754-3667-49f6-a8b1-f9ca80dca46f" containerName="collect-profiles" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.144079 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="4464f754-3667-49f6-a8b1-f9ca80dca46f" containerName="collect-profiles" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.144892 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mg2wc" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.147770 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.147789 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.147955 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.148036 4784 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-l6flr" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.168361 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-mg2wc"] Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.191292 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3785def2-7561-4dd3-98eb-f9f77139daea-node-mnt\") pod \"crc-storage-crc-mg2wc\" (UID: \"3785def2-7561-4dd3-98eb-f9f77139daea\") " pod="crc-storage/crc-storage-crc-mg2wc" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.191367 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5slcj\" (UniqueName: \"kubernetes.io/projected/3785def2-7561-4dd3-98eb-f9f77139daea-kube-api-access-5slcj\") pod \"crc-storage-crc-mg2wc\" (UID: \"3785def2-7561-4dd3-98eb-f9f77139daea\") " pod="crc-storage/crc-storage-crc-mg2wc" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.191426 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3785def2-7561-4dd3-98eb-f9f77139daea-crc-storage\") pod \"crc-storage-crc-mg2wc\" (UID: \"3785def2-7561-4dd3-98eb-f9f77139daea\") " pod="crc-storage/crc-storage-crc-mg2wc" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.292644 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3785def2-7561-4dd3-98eb-f9f77139daea-node-mnt\") pod \"crc-storage-crc-mg2wc\" (UID: \"3785def2-7561-4dd3-98eb-f9f77139daea\") " pod="crc-storage/crc-storage-crc-mg2wc" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.292732 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5slcj\" (UniqueName: \"kubernetes.io/projected/3785def2-7561-4dd3-98eb-f9f77139daea-kube-api-access-5slcj\") pod \"crc-storage-crc-mg2wc\" (UID: \"3785def2-7561-4dd3-98eb-f9f77139daea\") " pod="crc-storage/crc-storage-crc-mg2wc" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.292809 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3785def2-7561-4dd3-98eb-f9f77139daea-crc-storage\") pod \"crc-storage-crc-mg2wc\" (UID: \"3785def2-7561-4dd3-98eb-f9f77139daea\") " pod="crc-storage/crc-storage-crc-mg2wc" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.293013 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3785def2-7561-4dd3-98eb-f9f77139daea-node-mnt\") pod \"crc-storage-crc-mg2wc\" (UID: \"3785def2-7561-4dd3-98eb-f9f77139daea\") " pod="crc-storage/crc-storage-crc-mg2wc" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.294505 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3785def2-7561-4dd3-98eb-f9f77139daea-crc-storage\") pod \"crc-storage-crc-mg2wc\" (UID: \"3785def2-7561-4dd3-98eb-f9f77139daea\") " pod="crc-storage/crc-storage-crc-mg2wc" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.323623 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5slcj\" (UniqueName: \"kubernetes.io/projected/3785def2-7561-4dd3-98eb-f9f77139daea-kube-api-access-5slcj\") pod \"crc-storage-crc-mg2wc\" (UID: \"3785def2-7561-4dd3-98eb-f9f77139daea\") " pod="crc-storage/crc-storage-crc-mg2wc" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.324104 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4853126c-df1d-4bf9-bbe3-b0c2d47eec21" path="/var/lib/kubelet/pods/4853126c-df1d-4bf9-bbe3-b0c2d47eec21/volumes" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.474361 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mg2wc" Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.746748 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-mg2wc"] Jan 06 09:35:36 crc kubenswrapper[4784]: I0106 09:35:36.753843 4784 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 06 09:35:37 crc kubenswrapper[4784]: I0106 09:35:37.668401 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-mg2wc" event={"ID":"3785def2-7561-4dd3-98eb-f9f77139daea","Type":"ContainerStarted","Data":"82138bd039035802a484241893b9aeb3b33305b489e5fbbc6d063fe396bb98e1"} Jan 06 09:35:37 crc kubenswrapper[4784]: I0106 09:35:37.669304 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-mg2wc" event={"ID":"3785def2-7561-4dd3-98eb-f9f77139daea","Type":"ContainerStarted","Data":"e5402aea3d9465fd4fd23d1c5e6cdc834c5f5a7f571ebad659c0e2c1db9e45b0"} Jan 06 09:35:37 crc kubenswrapper[4784]: I0106 09:35:37.694948 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="crc-storage/crc-storage-crc-mg2wc" podStartSLOduration=1.086980949 podStartE2EDuration="1.694917484s" podCreationTimestamp="2026-01-06 09:35:36 +0000 UTC" firstStartedPulling="2026-01-06 09:35:36.753656081 +0000 UTC m=+4838.799828918" lastFinishedPulling="2026-01-06 09:35:37.361592576 +0000 UTC m=+4839.407765453" observedRunningTime="2026-01-06 09:35:37.692718796 +0000 UTC m=+4839.738891714" watchObservedRunningTime="2026-01-06 09:35:37.694917484 +0000 UTC m=+4839.741090371" Jan 06 09:35:38 crc kubenswrapper[4784]: I0106 09:35:38.680290 4784 generic.go:334] "Generic (PLEG): container finished" podID="3785def2-7561-4dd3-98eb-f9f77139daea" containerID="82138bd039035802a484241893b9aeb3b33305b489e5fbbc6d063fe396bb98e1" exitCode=0 Jan 06 09:35:38 crc kubenswrapper[4784]: I0106 09:35:38.680380 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-mg2wc" event={"ID":"3785def2-7561-4dd3-98eb-f9f77139daea","Type":"ContainerDied","Data":"82138bd039035802a484241893b9aeb3b33305b489e5fbbc6d063fe396bb98e1"} Jan 06 09:35:40 crc kubenswrapper[4784]: I0106 09:35:40.042891 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mg2wc" Jan 06 09:35:40 crc kubenswrapper[4784]: I0106 09:35:40.054687 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3785def2-7561-4dd3-98eb-f9f77139daea-crc-storage\") pod \"3785def2-7561-4dd3-98eb-f9f77139daea\" (UID: \"3785def2-7561-4dd3-98eb-f9f77139daea\") " Jan 06 09:35:40 crc kubenswrapper[4784]: I0106 09:35:40.054806 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3785def2-7561-4dd3-98eb-f9f77139daea-node-mnt\") pod \"3785def2-7561-4dd3-98eb-f9f77139daea\" (UID: \"3785def2-7561-4dd3-98eb-f9f77139daea\") " Jan 06 09:35:40 crc kubenswrapper[4784]: I0106 09:35:40.054881 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3785def2-7561-4dd3-98eb-f9f77139daea-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "3785def2-7561-4dd3-98eb-f9f77139daea" (UID: "3785def2-7561-4dd3-98eb-f9f77139daea"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 09:35:40 crc kubenswrapper[4784]: I0106 09:35:40.054955 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5slcj\" (UniqueName: \"kubernetes.io/projected/3785def2-7561-4dd3-98eb-f9f77139daea-kube-api-access-5slcj\") pod \"3785def2-7561-4dd3-98eb-f9f77139daea\" (UID: \"3785def2-7561-4dd3-98eb-f9f77139daea\") " Jan 06 09:35:40 crc kubenswrapper[4784]: I0106 09:35:40.055402 4784 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3785def2-7561-4dd3-98eb-f9f77139daea-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 06 09:35:40 crc kubenswrapper[4784]: I0106 09:35:40.063964 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3785def2-7561-4dd3-98eb-f9f77139daea-kube-api-access-5slcj" (OuterVolumeSpecName: "kube-api-access-5slcj") pod "3785def2-7561-4dd3-98eb-f9f77139daea" (UID: "3785def2-7561-4dd3-98eb-f9f77139daea"). InnerVolumeSpecName "kube-api-access-5slcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:35:40 crc kubenswrapper[4784]: I0106 09:35:40.079701 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3785def2-7561-4dd3-98eb-f9f77139daea-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "3785def2-7561-4dd3-98eb-f9f77139daea" (UID: "3785def2-7561-4dd3-98eb-f9f77139daea"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:35:40 crc kubenswrapper[4784]: I0106 09:35:40.156856 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5slcj\" (UniqueName: \"kubernetes.io/projected/3785def2-7561-4dd3-98eb-f9f77139daea-kube-api-access-5slcj\") on node \"crc\" DevicePath \"\"" Jan 06 09:35:40 crc kubenswrapper[4784]: I0106 09:35:40.156905 4784 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3785def2-7561-4dd3-98eb-f9f77139daea-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 06 09:35:40 crc kubenswrapper[4784]: I0106 09:35:40.700577 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-mg2wc" event={"ID":"3785def2-7561-4dd3-98eb-f9f77139daea","Type":"ContainerDied","Data":"e5402aea3d9465fd4fd23d1c5e6cdc834c5f5a7f571ebad659c0e2c1db9e45b0"} Jan 06 09:35:40 crc kubenswrapper[4784]: I0106 09:35:40.700643 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mg2wc" Jan 06 09:35:40 crc kubenswrapper[4784]: I0106 09:35:40.700634 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5402aea3d9465fd4fd23d1c5e6cdc834c5f5a7f571ebad659c0e2c1db9e45b0" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.297492 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-mg2wc"] Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.307676 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-mg2wc"] Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.332197 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3785def2-7561-4dd3-98eb-f9f77139daea" path="/var/lib/kubelet/pods/3785def2-7561-4dd3-98eb-f9f77139daea/volumes" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.518762 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-9vlzh"] Jan 06 09:35:42 crc kubenswrapper[4784]: E0106 09:35:42.519253 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3785def2-7561-4dd3-98eb-f9f77139daea" containerName="storage" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.519287 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3785def2-7561-4dd3-98eb-f9f77139daea" containerName="storage" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.519582 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="3785def2-7561-4dd3-98eb-f9f77139daea" containerName="storage" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.520378 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-9vlzh" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.523504 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.523903 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.524514 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.526260 4784 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-l6flr" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.532713 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-9vlzh"] Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.594239 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxzqz\" (UniqueName: \"kubernetes.io/projected/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-kube-api-access-pxzqz\") pod \"crc-storage-crc-9vlzh\" (UID: \"f2fc8b57-4a88-433c-a210-7f0c17ca0f01\") " pod="crc-storage/crc-storage-crc-9vlzh" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.594499 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-crc-storage\") pod \"crc-storage-crc-9vlzh\" (UID: \"f2fc8b57-4a88-433c-a210-7f0c17ca0f01\") " pod="crc-storage/crc-storage-crc-9vlzh" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.594596 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-node-mnt\") pod \"crc-storage-crc-9vlzh\" (UID: \"f2fc8b57-4a88-433c-a210-7f0c17ca0f01\") " pod="crc-storage/crc-storage-crc-9vlzh" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.695938 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-crc-storage\") pod \"crc-storage-crc-9vlzh\" (UID: \"f2fc8b57-4a88-433c-a210-7f0c17ca0f01\") " pod="crc-storage/crc-storage-crc-9vlzh" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.696024 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-node-mnt\") pod \"crc-storage-crc-9vlzh\" (UID: \"f2fc8b57-4a88-433c-a210-7f0c17ca0f01\") " pod="crc-storage/crc-storage-crc-9vlzh" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.696172 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxzqz\" (UniqueName: \"kubernetes.io/projected/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-kube-api-access-pxzqz\") pod \"crc-storage-crc-9vlzh\" (UID: \"f2fc8b57-4a88-433c-a210-7f0c17ca0f01\") " pod="crc-storage/crc-storage-crc-9vlzh" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.696782 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-node-mnt\") pod \"crc-storage-crc-9vlzh\" (UID: \"f2fc8b57-4a88-433c-a210-7f0c17ca0f01\") " pod="crc-storage/crc-storage-crc-9vlzh" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.697330 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-crc-storage\") pod \"crc-storage-crc-9vlzh\" (UID: \"f2fc8b57-4a88-433c-a210-7f0c17ca0f01\") " pod="crc-storage/crc-storage-crc-9vlzh" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.736227 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxzqz\" (UniqueName: \"kubernetes.io/projected/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-kube-api-access-pxzqz\") pod \"crc-storage-crc-9vlzh\" (UID: \"f2fc8b57-4a88-433c-a210-7f0c17ca0f01\") " pod="crc-storage/crc-storage-crc-9vlzh" Jan 06 09:35:42 crc kubenswrapper[4784]: I0106 09:35:42.850981 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-9vlzh" Jan 06 09:35:43 crc kubenswrapper[4784]: I0106 09:35:43.389003 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-9vlzh"] Jan 06 09:35:43 crc kubenswrapper[4784]: I0106 09:35:43.732170 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-9vlzh" event={"ID":"f2fc8b57-4a88-433c-a210-7f0c17ca0f01","Type":"ContainerStarted","Data":"c8bb67e74ab030a70993e7905761de322a9cee4fb39eff9c7a21fa92ff62b4b0"} Jan 06 09:35:44 crc kubenswrapper[4784]: I0106 09:35:44.350980 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:35:44 crc kubenswrapper[4784]: I0106 09:35:44.351303 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:35:44 crc kubenswrapper[4784]: I0106 09:35:44.744775 4784 generic.go:334] "Generic (PLEG): container finished" podID="f2fc8b57-4a88-433c-a210-7f0c17ca0f01" containerID="a8c5fde198a28473840bfa24f8bc606ee39c7f8462ffa47286a00d0850a221fe" exitCode=0 Jan 06 09:35:44 crc kubenswrapper[4784]: I0106 09:35:44.744835 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-9vlzh" event={"ID":"f2fc8b57-4a88-433c-a210-7f0c17ca0f01","Type":"ContainerDied","Data":"a8c5fde198a28473840bfa24f8bc606ee39c7f8462ffa47286a00d0850a221fe"} Jan 06 09:35:46 crc kubenswrapper[4784]: I0106 09:35:46.143252 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-9vlzh" Jan 06 09:35:46 crc kubenswrapper[4784]: I0106 09:35:46.256657 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-crc-storage\") pod \"f2fc8b57-4a88-433c-a210-7f0c17ca0f01\" (UID: \"f2fc8b57-4a88-433c-a210-7f0c17ca0f01\") " Jan 06 09:35:46 crc kubenswrapper[4784]: I0106 09:35:46.256769 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-node-mnt\") pod \"f2fc8b57-4a88-433c-a210-7f0c17ca0f01\" (UID: \"f2fc8b57-4a88-433c-a210-7f0c17ca0f01\") " Jan 06 09:35:46 crc kubenswrapper[4784]: I0106 09:35:46.256911 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "f2fc8b57-4a88-433c-a210-7f0c17ca0f01" (UID: "f2fc8b57-4a88-433c-a210-7f0c17ca0f01"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 09:35:46 crc kubenswrapper[4784]: I0106 09:35:46.256931 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxzqz\" (UniqueName: \"kubernetes.io/projected/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-kube-api-access-pxzqz\") pod \"f2fc8b57-4a88-433c-a210-7f0c17ca0f01\" (UID: \"f2fc8b57-4a88-433c-a210-7f0c17ca0f01\") " Jan 06 09:35:46 crc kubenswrapper[4784]: I0106 09:35:46.257334 4784 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 06 09:35:46 crc kubenswrapper[4784]: I0106 09:35:46.263866 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-kube-api-access-pxzqz" (OuterVolumeSpecName: "kube-api-access-pxzqz") pod "f2fc8b57-4a88-433c-a210-7f0c17ca0f01" (UID: "f2fc8b57-4a88-433c-a210-7f0c17ca0f01"). InnerVolumeSpecName "kube-api-access-pxzqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:35:46 crc kubenswrapper[4784]: I0106 09:35:46.295871 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "f2fc8b57-4a88-433c-a210-7f0c17ca0f01" (UID: "f2fc8b57-4a88-433c-a210-7f0c17ca0f01"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:35:46 crc kubenswrapper[4784]: I0106 09:35:46.359387 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxzqz\" (UniqueName: \"kubernetes.io/projected/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-kube-api-access-pxzqz\") on node \"crc\" DevicePath \"\"" Jan 06 09:35:46 crc kubenswrapper[4784]: I0106 09:35:46.359618 4784 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/f2fc8b57-4a88-433c-a210-7f0c17ca0f01-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 06 09:35:46 crc kubenswrapper[4784]: I0106 09:35:46.763621 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-9vlzh" event={"ID":"f2fc8b57-4a88-433c-a210-7f0c17ca0f01","Type":"ContainerDied","Data":"c8bb67e74ab030a70993e7905761de322a9cee4fb39eff9c7a21fa92ff62b4b0"} Jan 06 09:35:46 crc kubenswrapper[4784]: I0106 09:35:46.763678 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8bb67e74ab030a70993e7905761de322a9cee4fb39eff9c7a21fa92ff62b4b0" Jan 06 09:35:46 crc kubenswrapper[4784]: I0106 09:35:46.763689 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-9vlzh" Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.041141 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zrmlx"] Jan 06 09:35:56 crc kubenswrapper[4784]: E0106 09:35:56.042067 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2fc8b57-4a88-433c-a210-7f0c17ca0f01" containerName="storage" Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.042090 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2fc8b57-4a88-433c-a210-7f0c17ca0f01" containerName="storage" Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.042353 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2fc8b57-4a88-433c-a210-7f0c17ca0f01" containerName="storage" Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.052982 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.095107 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zrmlx"] Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.234777 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkwvf\" (UniqueName: \"kubernetes.io/projected/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-kube-api-access-fkwvf\") pod \"redhat-marketplace-zrmlx\" (UID: \"9cdf11c4-2097-4d09-b48f-da7a0f8e480c\") " pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.234854 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-catalog-content\") pod \"redhat-marketplace-zrmlx\" (UID: \"9cdf11c4-2097-4d09-b48f-da7a0f8e480c\") " pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.234890 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-utilities\") pod \"redhat-marketplace-zrmlx\" (UID: \"9cdf11c4-2097-4d09-b48f-da7a0f8e480c\") " pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.336888 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkwvf\" (UniqueName: \"kubernetes.io/projected/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-kube-api-access-fkwvf\") pod \"redhat-marketplace-zrmlx\" (UID: \"9cdf11c4-2097-4d09-b48f-da7a0f8e480c\") " pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.336975 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-catalog-content\") pod \"redhat-marketplace-zrmlx\" (UID: \"9cdf11c4-2097-4d09-b48f-da7a0f8e480c\") " pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.337021 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-utilities\") pod \"redhat-marketplace-zrmlx\" (UID: \"9cdf11c4-2097-4d09-b48f-da7a0f8e480c\") " pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.338059 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-catalog-content\") pod \"redhat-marketplace-zrmlx\" (UID: \"9cdf11c4-2097-4d09-b48f-da7a0f8e480c\") " pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.338116 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-utilities\") pod \"redhat-marketplace-zrmlx\" (UID: \"9cdf11c4-2097-4d09-b48f-da7a0f8e480c\") " pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.374023 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkwvf\" (UniqueName: \"kubernetes.io/projected/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-kube-api-access-fkwvf\") pod \"redhat-marketplace-zrmlx\" (UID: \"9cdf11c4-2097-4d09-b48f-da7a0f8e480c\") " pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.404382 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.662507 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zrmlx"] Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.861048 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zrmlx" event={"ID":"9cdf11c4-2097-4d09-b48f-da7a0f8e480c","Type":"ContainerStarted","Data":"505ca743bdba0e31d0432cba2f4729657610e82ba0585154faf92aacd50b1452"} Jan 06 09:35:56 crc kubenswrapper[4784]: I0106 09:35:56.861089 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zrmlx" event={"ID":"9cdf11c4-2097-4d09-b48f-da7a0f8e480c","Type":"ContainerStarted","Data":"7df49e3c49bc36448e57a5d16bd9092e7a4bbc7f3ff10c0fca863c34467c4d43"} Jan 06 09:35:57 crc kubenswrapper[4784]: I0106 09:35:57.873953 4784 generic.go:334] "Generic (PLEG): container finished" podID="9cdf11c4-2097-4d09-b48f-da7a0f8e480c" containerID="505ca743bdba0e31d0432cba2f4729657610e82ba0585154faf92aacd50b1452" exitCode=0 Jan 06 09:35:57 crc kubenswrapper[4784]: I0106 09:35:57.874078 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zrmlx" event={"ID":"9cdf11c4-2097-4d09-b48f-da7a0f8e480c","Type":"ContainerDied","Data":"505ca743bdba0e31d0432cba2f4729657610e82ba0585154faf92aacd50b1452"} Jan 06 09:35:58 crc kubenswrapper[4784]: I0106 09:35:58.886591 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zrmlx" event={"ID":"9cdf11c4-2097-4d09-b48f-da7a0f8e480c","Type":"ContainerStarted","Data":"93e82633bd57e2ca51410a5644307696274ff2e959fda82abddc42645afa4f8d"} Jan 06 09:35:59 crc kubenswrapper[4784]: I0106 09:35:59.899651 4784 generic.go:334] "Generic (PLEG): container finished" podID="9cdf11c4-2097-4d09-b48f-da7a0f8e480c" containerID="93e82633bd57e2ca51410a5644307696274ff2e959fda82abddc42645afa4f8d" exitCode=0 Jan 06 09:35:59 crc kubenswrapper[4784]: I0106 09:35:59.899744 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zrmlx" event={"ID":"9cdf11c4-2097-4d09-b48f-da7a0f8e480c","Type":"ContainerDied","Data":"93e82633bd57e2ca51410a5644307696274ff2e959fda82abddc42645afa4f8d"} Jan 06 09:36:00 crc kubenswrapper[4784]: I0106 09:36:00.918474 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zrmlx" event={"ID":"9cdf11c4-2097-4d09-b48f-da7a0f8e480c","Type":"ContainerStarted","Data":"5c48106c6b2d3db1b4deebcb737dd65c7b2c9d5420f8c0bb4d821f695c052bfd"} Jan 06 09:36:00 crc kubenswrapper[4784]: I0106 09:36:00.945915 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zrmlx" podStartSLOduration=2.477712027 podStartE2EDuration="4.945896078s" podCreationTimestamp="2026-01-06 09:35:56 +0000 UTC" firstStartedPulling="2026-01-06 09:35:57.876873511 +0000 UTC m=+4859.923046388" lastFinishedPulling="2026-01-06 09:36:00.345057562 +0000 UTC m=+4862.391230439" observedRunningTime="2026-01-06 09:36:00.939150638 +0000 UTC m=+4862.985323485" watchObservedRunningTime="2026-01-06 09:36:00.945896078 +0000 UTC m=+4862.992068925" Jan 06 09:36:06 crc kubenswrapper[4784]: I0106 09:36:06.404771 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:36:06 crc kubenswrapper[4784]: I0106 09:36:06.405178 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:36:06 crc kubenswrapper[4784]: I0106 09:36:06.483642 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:36:07 crc kubenswrapper[4784]: I0106 09:36:07.051728 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:36:07 crc kubenswrapper[4784]: I0106 09:36:07.111662 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zrmlx"] Jan 06 09:36:08 crc kubenswrapper[4784]: I0106 09:36:08.990316 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zrmlx" podUID="9cdf11c4-2097-4d09-b48f-da7a0f8e480c" containerName="registry-server" containerID="cri-o://5c48106c6b2d3db1b4deebcb737dd65c7b2c9d5420f8c0bb4d821f695c052bfd" gracePeriod=2 Jan 06 09:36:09 crc kubenswrapper[4784]: I0106 09:36:09.637497 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:36:09 crc kubenswrapper[4784]: I0106 09:36:09.775670 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-utilities\") pod \"9cdf11c4-2097-4d09-b48f-da7a0f8e480c\" (UID: \"9cdf11c4-2097-4d09-b48f-da7a0f8e480c\") " Jan 06 09:36:09 crc kubenswrapper[4784]: I0106 09:36:09.775715 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fkwvf\" (UniqueName: \"kubernetes.io/projected/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-kube-api-access-fkwvf\") pod \"9cdf11c4-2097-4d09-b48f-da7a0f8e480c\" (UID: \"9cdf11c4-2097-4d09-b48f-da7a0f8e480c\") " Jan 06 09:36:09 crc kubenswrapper[4784]: I0106 09:36:09.775737 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-catalog-content\") pod \"9cdf11c4-2097-4d09-b48f-da7a0f8e480c\" (UID: \"9cdf11c4-2097-4d09-b48f-da7a0f8e480c\") " Jan 06 09:36:09 crc kubenswrapper[4784]: I0106 09:36:09.776944 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-utilities" (OuterVolumeSpecName: "utilities") pod "9cdf11c4-2097-4d09-b48f-da7a0f8e480c" (UID: "9cdf11c4-2097-4d09-b48f-da7a0f8e480c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:36:09 crc kubenswrapper[4784]: I0106 09:36:09.788749 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-kube-api-access-fkwvf" (OuterVolumeSpecName: "kube-api-access-fkwvf") pod "9cdf11c4-2097-4d09-b48f-da7a0f8e480c" (UID: "9cdf11c4-2097-4d09-b48f-da7a0f8e480c"). InnerVolumeSpecName "kube-api-access-fkwvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:36:09 crc kubenswrapper[4784]: I0106 09:36:09.804618 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9cdf11c4-2097-4d09-b48f-da7a0f8e480c" (UID: "9cdf11c4-2097-4d09-b48f-da7a0f8e480c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:36:09 crc kubenswrapper[4784]: I0106 09:36:09.877052 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:36:09 crc kubenswrapper[4784]: I0106 09:36:09.877090 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fkwvf\" (UniqueName: \"kubernetes.io/projected/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-kube-api-access-fkwvf\") on node \"crc\" DevicePath \"\"" Jan 06 09:36:09 crc kubenswrapper[4784]: I0106 09:36:09.877104 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdf11c4-2097-4d09-b48f-da7a0f8e480c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.001271 4784 generic.go:334] "Generic (PLEG): container finished" podID="9cdf11c4-2097-4d09-b48f-da7a0f8e480c" containerID="5c48106c6b2d3db1b4deebcb737dd65c7b2c9d5420f8c0bb4d821f695c052bfd" exitCode=0 Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.001338 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zrmlx" Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.001382 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zrmlx" event={"ID":"9cdf11c4-2097-4d09-b48f-da7a0f8e480c","Type":"ContainerDied","Data":"5c48106c6b2d3db1b4deebcb737dd65c7b2c9d5420f8c0bb4d821f695c052bfd"} Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.001965 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zrmlx" event={"ID":"9cdf11c4-2097-4d09-b48f-da7a0f8e480c","Type":"ContainerDied","Data":"7df49e3c49bc36448e57a5d16bd9092e7a4bbc7f3ff10c0fca863c34467c4d43"} Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.002009 4784 scope.go:117] "RemoveContainer" containerID="5c48106c6b2d3db1b4deebcb737dd65c7b2c9d5420f8c0bb4d821f695c052bfd" Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.035043 4784 scope.go:117] "RemoveContainer" containerID="93e82633bd57e2ca51410a5644307696274ff2e959fda82abddc42645afa4f8d" Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.057210 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zrmlx"] Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.071701 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zrmlx"] Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.085083 4784 scope.go:117] "RemoveContainer" containerID="505ca743bdba0e31d0432cba2f4729657610e82ba0585154faf92aacd50b1452" Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.110455 4784 scope.go:117] "RemoveContainer" containerID="5c48106c6b2d3db1b4deebcb737dd65c7b2c9d5420f8c0bb4d821f695c052bfd" Jan 06 09:36:10 crc kubenswrapper[4784]: E0106 09:36:10.111167 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c48106c6b2d3db1b4deebcb737dd65c7b2c9d5420f8c0bb4d821f695c052bfd\": container with ID starting with 5c48106c6b2d3db1b4deebcb737dd65c7b2c9d5420f8c0bb4d821f695c052bfd not found: ID does not exist" containerID="5c48106c6b2d3db1b4deebcb737dd65c7b2c9d5420f8c0bb4d821f695c052bfd" Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.111220 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c48106c6b2d3db1b4deebcb737dd65c7b2c9d5420f8c0bb4d821f695c052bfd"} err="failed to get container status \"5c48106c6b2d3db1b4deebcb737dd65c7b2c9d5420f8c0bb4d821f695c052bfd\": rpc error: code = NotFound desc = could not find container \"5c48106c6b2d3db1b4deebcb737dd65c7b2c9d5420f8c0bb4d821f695c052bfd\": container with ID starting with 5c48106c6b2d3db1b4deebcb737dd65c7b2c9d5420f8c0bb4d821f695c052bfd not found: ID does not exist" Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.111256 4784 scope.go:117] "RemoveContainer" containerID="93e82633bd57e2ca51410a5644307696274ff2e959fda82abddc42645afa4f8d" Jan 06 09:36:10 crc kubenswrapper[4784]: E0106 09:36:10.111907 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93e82633bd57e2ca51410a5644307696274ff2e959fda82abddc42645afa4f8d\": container with ID starting with 93e82633bd57e2ca51410a5644307696274ff2e959fda82abddc42645afa4f8d not found: ID does not exist" containerID="93e82633bd57e2ca51410a5644307696274ff2e959fda82abddc42645afa4f8d" Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.112228 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93e82633bd57e2ca51410a5644307696274ff2e959fda82abddc42645afa4f8d"} err="failed to get container status \"93e82633bd57e2ca51410a5644307696274ff2e959fda82abddc42645afa4f8d\": rpc error: code = NotFound desc = could not find container \"93e82633bd57e2ca51410a5644307696274ff2e959fda82abddc42645afa4f8d\": container with ID starting with 93e82633bd57e2ca51410a5644307696274ff2e959fda82abddc42645afa4f8d not found: ID does not exist" Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.112437 4784 scope.go:117] "RemoveContainer" containerID="505ca743bdba0e31d0432cba2f4729657610e82ba0585154faf92aacd50b1452" Jan 06 09:36:10 crc kubenswrapper[4784]: E0106 09:36:10.113146 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"505ca743bdba0e31d0432cba2f4729657610e82ba0585154faf92aacd50b1452\": container with ID starting with 505ca743bdba0e31d0432cba2f4729657610e82ba0585154faf92aacd50b1452 not found: ID does not exist" containerID="505ca743bdba0e31d0432cba2f4729657610e82ba0585154faf92aacd50b1452" Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.113188 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"505ca743bdba0e31d0432cba2f4729657610e82ba0585154faf92aacd50b1452"} err="failed to get container status \"505ca743bdba0e31d0432cba2f4729657610e82ba0585154faf92aacd50b1452\": rpc error: code = NotFound desc = could not find container \"505ca743bdba0e31d0432cba2f4729657610e82ba0585154faf92aacd50b1452\": container with ID starting with 505ca743bdba0e31d0432cba2f4729657610e82ba0585154faf92aacd50b1452 not found: ID does not exist" Jan 06 09:36:10 crc kubenswrapper[4784]: I0106 09:36:10.344436 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cdf11c4-2097-4d09-b48f-da7a0f8e480c" path="/var/lib/kubelet/pods/9cdf11c4-2097-4d09-b48f-da7a0f8e480c/volumes" Jan 06 09:36:14 crc kubenswrapper[4784]: I0106 09:36:14.351111 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:36:14 crc kubenswrapper[4784]: I0106 09:36:14.351634 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:36:14 crc kubenswrapper[4784]: I0106 09:36:14.351719 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 09:36:14 crc kubenswrapper[4784]: I0106 09:36:14.352742 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b7b9e448a6f33bd942c79767c82389e1acef1e178a7310c3b4956a84d6bbce1a"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 09:36:14 crc kubenswrapper[4784]: I0106 09:36:14.352861 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://b7b9e448a6f33bd942c79767c82389e1acef1e178a7310c3b4956a84d6bbce1a" gracePeriod=600 Jan 06 09:36:15 crc kubenswrapper[4784]: I0106 09:36:15.082724 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="b7b9e448a6f33bd942c79767c82389e1acef1e178a7310c3b4956a84d6bbce1a" exitCode=0 Jan 06 09:36:15 crc kubenswrapper[4784]: I0106 09:36:15.083023 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"b7b9e448a6f33bd942c79767c82389e1acef1e178a7310c3b4956a84d6bbce1a"} Jan 06 09:36:15 crc kubenswrapper[4784]: I0106 09:36:15.083142 4784 scope.go:117] "RemoveContainer" containerID="19e44a00b314657197593a36f86b5572697332b9b4f9a74b907da020debd1322" Jan 06 09:36:16 crc kubenswrapper[4784]: I0106 09:36:16.097703 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f"} Jan 06 09:36:17 crc kubenswrapper[4784]: I0106 09:36:17.433684 4784 scope.go:117] "RemoveContainer" containerID="b2e5358c1ff5f41895d6449d3f32633bdf3c588e3a3bbfe6b5035d485ef6b41b" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.511152 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5m6lt"] Jan 06 09:36:36 crc kubenswrapper[4784]: E0106 09:36:36.512085 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cdf11c4-2097-4d09-b48f-da7a0f8e480c" containerName="extract-utilities" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.512101 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cdf11c4-2097-4d09-b48f-da7a0f8e480c" containerName="extract-utilities" Jan 06 09:36:36 crc kubenswrapper[4784]: E0106 09:36:36.512135 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cdf11c4-2097-4d09-b48f-da7a0f8e480c" containerName="extract-content" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.512145 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cdf11c4-2097-4d09-b48f-da7a0f8e480c" containerName="extract-content" Jan 06 09:36:36 crc kubenswrapper[4784]: E0106 09:36:36.512157 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cdf11c4-2097-4d09-b48f-da7a0f8e480c" containerName="registry-server" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.512167 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cdf11c4-2097-4d09-b48f-da7a0f8e480c" containerName="registry-server" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.512346 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cdf11c4-2097-4d09-b48f-da7a0f8e480c" containerName="registry-server" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.513594 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.536607 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5m6lt"] Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.690410 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3defe8c3-b07d-403b-9dd3-be8b88717fd0-catalog-content\") pod \"community-operators-5m6lt\" (UID: \"3defe8c3-b07d-403b-9dd3-be8b88717fd0\") " pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.690512 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jd6td\" (UniqueName: \"kubernetes.io/projected/3defe8c3-b07d-403b-9dd3-be8b88717fd0-kube-api-access-jd6td\") pod \"community-operators-5m6lt\" (UID: \"3defe8c3-b07d-403b-9dd3-be8b88717fd0\") " pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.690564 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3defe8c3-b07d-403b-9dd3-be8b88717fd0-utilities\") pod \"community-operators-5m6lt\" (UID: \"3defe8c3-b07d-403b-9dd3-be8b88717fd0\") " pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.791486 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jd6td\" (UniqueName: \"kubernetes.io/projected/3defe8c3-b07d-403b-9dd3-be8b88717fd0-kube-api-access-jd6td\") pod \"community-operators-5m6lt\" (UID: \"3defe8c3-b07d-403b-9dd3-be8b88717fd0\") " pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.791532 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3defe8c3-b07d-403b-9dd3-be8b88717fd0-utilities\") pod \"community-operators-5m6lt\" (UID: \"3defe8c3-b07d-403b-9dd3-be8b88717fd0\") " pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.791623 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3defe8c3-b07d-403b-9dd3-be8b88717fd0-catalog-content\") pod \"community-operators-5m6lt\" (UID: \"3defe8c3-b07d-403b-9dd3-be8b88717fd0\") " pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.792093 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3defe8c3-b07d-403b-9dd3-be8b88717fd0-utilities\") pod \"community-operators-5m6lt\" (UID: \"3defe8c3-b07d-403b-9dd3-be8b88717fd0\") " pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.792218 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3defe8c3-b07d-403b-9dd3-be8b88717fd0-catalog-content\") pod \"community-operators-5m6lt\" (UID: \"3defe8c3-b07d-403b-9dd3-be8b88717fd0\") " pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.811537 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jd6td\" (UniqueName: \"kubernetes.io/projected/3defe8c3-b07d-403b-9dd3-be8b88717fd0-kube-api-access-jd6td\") pod \"community-operators-5m6lt\" (UID: \"3defe8c3-b07d-403b-9dd3-be8b88717fd0\") " pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:36 crc kubenswrapper[4784]: I0106 09:36:36.833695 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:37 crc kubenswrapper[4784]: I0106 09:36:37.358961 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5m6lt"] Jan 06 09:36:38 crc kubenswrapper[4784]: I0106 09:36:38.305919 4784 generic.go:334] "Generic (PLEG): container finished" podID="3defe8c3-b07d-403b-9dd3-be8b88717fd0" containerID="01c18f40aea74405fbb953643db9ae4c4accd4b4421d7585591264bae23964ea" exitCode=0 Jan 06 09:36:38 crc kubenswrapper[4784]: I0106 09:36:38.305981 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5m6lt" event={"ID":"3defe8c3-b07d-403b-9dd3-be8b88717fd0","Type":"ContainerDied","Data":"01c18f40aea74405fbb953643db9ae4c4accd4b4421d7585591264bae23964ea"} Jan 06 09:36:38 crc kubenswrapper[4784]: I0106 09:36:38.306267 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5m6lt" event={"ID":"3defe8c3-b07d-403b-9dd3-be8b88717fd0","Type":"ContainerStarted","Data":"39b8e327aed95e6fe05857a42c69d888fdb8a81328b588ea0bf3f15dbc58e384"} Jan 06 09:36:39 crc kubenswrapper[4784]: I0106 09:36:39.319318 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5m6lt" event={"ID":"3defe8c3-b07d-403b-9dd3-be8b88717fd0","Type":"ContainerStarted","Data":"22fe1fe01e2b89c3702e6632b02d443af87ea8e9b67eae574ddb82fda43cae86"} Jan 06 09:36:40 crc kubenswrapper[4784]: I0106 09:36:40.325192 4784 generic.go:334] "Generic (PLEG): container finished" podID="3defe8c3-b07d-403b-9dd3-be8b88717fd0" containerID="22fe1fe01e2b89c3702e6632b02d443af87ea8e9b67eae574ddb82fda43cae86" exitCode=0 Jan 06 09:36:40 crc kubenswrapper[4784]: I0106 09:36:40.325230 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5m6lt" event={"ID":"3defe8c3-b07d-403b-9dd3-be8b88717fd0","Type":"ContainerDied","Data":"22fe1fe01e2b89c3702e6632b02d443af87ea8e9b67eae574ddb82fda43cae86"} Jan 06 09:36:41 crc kubenswrapper[4784]: I0106 09:36:41.336050 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5m6lt" event={"ID":"3defe8c3-b07d-403b-9dd3-be8b88717fd0","Type":"ContainerStarted","Data":"69971cd05ddc779410ea7ecd4a4a2c67116468f4edeb2ba295737d084b02b23c"} Jan 06 09:36:41 crc kubenswrapper[4784]: I0106 09:36:41.357729 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5m6lt" podStartSLOduration=2.918556855 podStartE2EDuration="5.357692275s" podCreationTimestamp="2026-01-06 09:36:36 +0000 UTC" firstStartedPulling="2026-01-06 09:36:38.30967199 +0000 UTC m=+4900.355844867" lastFinishedPulling="2026-01-06 09:36:40.74880741 +0000 UTC m=+4902.794980287" observedRunningTime="2026-01-06 09:36:41.350658676 +0000 UTC m=+4903.396831513" watchObservedRunningTime="2026-01-06 09:36:41.357692275 +0000 UTC m=+4903.403865152" Jan 06 09:36:46 crc kubenswrapper[4784]: I0106 09:36:46.834692 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:46 crc kubenswrapper[4784]: I0106 09:36:46.835797 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:46 crc kubenswrapper[4784]: I0106 09:36:46.882288 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:47 crc kubenswrapper[4784]: I0106 09:36:47.465725 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:47 crc kubenswrapper[4784]: I0106 09:36:47.538869 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5m6lt"] Jan 06 09:36:49 crc kubenswrapper[4784]: I0106 09:36:49.399057 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5m6lt" podUID="3defe8c3-b07d-403b-9dd3-be8b88717fd0" containerName="registry-server" containerID="cri-o://69971cd05ddc779410ea7ecd4a4a2c67116468f4edeb2ba295737d084b02b23c" gracePeriod=2 Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.388249 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.410314 4784 generic.go:334] "Generic (PLEG): container finished" podID="3defe8c3-b07d-403b-9dd3-be8b88717fd0" containerID="69971cd05ddc779410ea7ecd4a4a2c67116468f4edeb2ba295737d084b02b23c" exitCode=0 Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.410368 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5m6lt" event={"ID":"3defe8c3-b07d-403b-9dd3-be8b88717fd0","Type":"ContainerDied","Data":"69971cd05ddc779410ea7ecd4a4a2c67116468f4edeb2ba295737d084b02b23c"} Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.410417 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5m6lt" event={"ID":"3defe8c3-b07d-403b-9dd3-be8b88717fd0","Type":"ContainerDied","Data":"39b8e327aed95e6fe05857a42c69d888fdb8a81328b588ea0bf3f15dbc58e384"} Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.410441 4784 scope.go:117] "RemoveContainer" containerID="69971cd05ddc779410ea7ecd4a4a2c67116468f4edeb2ba295737d084b02b23c" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.410513 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5m6lt" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.430354 4784 scope.go:117] "RemoveContainer" containerID="22fe1fe01e2b89c3702e6632b02d443af87ea8e9b67eae574ddb82fda43cae86" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.457777 4784 scope.go:117] "RemoveContainer" containerID="01c18f40aea74405fbb953643db9ae4c4accd4b4421d7585591264bae23964ea" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.494592 4784 scope.go:117] "RemoveContainer" containerID="69971cd05ddc779410ea7ecd4a4a2c67116468f4edeb2ba295737d084b02b23c" Jan 06 09:36:50 crc kubenswrapper[4784]: E0106 09:36:50.495111 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69971cd05ddc779410ea7ecd4a4a2c67116468f4edeb2ba295737d084b02b23c\": container with ID starting with 69971cd05ddc779410ea7ecd4a4a2c67116468f4edeb2ba295737d084b02b23c not found: ID does not exist" containerID="69971cd05ddc779410ea7ecd4a4a2c67116468f4edeb2ba295737d084b02b23c" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.495190 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69971cd05ddc779410ea7ecd4a4a2c67116468f4edeb2ba295737d084b02b23c"} err="failed to get container status \"69971cd05ddc779410ea7ecd4a4a2c67116468f4edeb2ba295737d084b02b23c\": rpc error: code = NotFound desc = could not find container \"69971cd05ddc779410ea7ecd4a4a2c67116468f4edeb2ba295737d084b02b23c\": container with ID starting with 69971cd05ddc779410ea7ecd4a4a2c67116468f4edeb2ba295737d084b02b23c not found: ID does not exist" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.495223 4784 scope.go:117] "RemoveContainer" containerID="22fe1fe01e2b89c3702e6632b02d443af87ea8e9b67eae574ddb82fda43cae86" Jan 06 09:36:50 crc kubenswrapper[4784]: E0106 09:36:50.495989 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22fe1fe01e2b89c3702e6632b02d443af87ea8e9b67eae574ddb82fda43cae86\": container with ID starting with 22fe1fe01e2b89c3702e6632b02d443af87ea8e9b67eae574ddb82fda43cae86 not found: ID does not exist" containerID="22fe1fe01e2b89c3702e6632b02d443af87ea8e9b67eae574ddb82fda43cae86" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.496036 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22fe1fe01e2b89c3702e6632b02d443af87ea8e9b67eae574ddb82fda43cae86"} err="failed to get container status \"22fe1fe01e2b89c3702e6632b02d443af87ea8e9b67eae574ddb82fda43cae86\": rpc error: code = NotFound desc = could not find container \"22fe1fe01e2b89c3702e6632b02d443af87ea8e9b67eae574ddb82fda43cae86\": container with ID starting with 22fe1fe01e2b89c3702e6632b02d443af87ea8e9b67eae574ddb82fda43cae86 not found: ID does not exist" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.496063 4784 scope.go:117] "RemoveContainer" containerID="01c18f40aea74405fbb953643db9ae4c4accd4b4421d7585591264bae23964ea" Jan 06 09:36:50 crc kubenswrapper[4784]: E0106 09:36:50.496526 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01c18f40aea74405fbb953643db9ae4c4accd4b4421d7585591264bae23964ea\": container with ID starting with 01c18f40aea74405fbb953643db9ae4c4accd4b4421d7585591264bae23964ea not found: ID does not exist" containerID="01c18f40aea74405fbb953643db9ae4c4accd4b4421d7585591264bae23964ea" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.496666 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01c18f40aea74405fbb953643db9ae4c4accd4b4421d7585591264bae23964ea"} err="failed to get container status \"01c18f40aea74405fbb953643db9ae4c4accd4b4421d7585591264bae23964ea\": rpc error: code = NotFound desc = could not find container \"01c18f40aea74405fbb953643db9ae4c4accd4b4421d7585591264bae23964ea\": container with ID starting with 01c18f40aea74405fbb953643db9ae4c4accd4b4421d7585591264bae23964ea not found: ID does not exist" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.515117 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jd6td\" (UniqueName: \"kubernetes.io/projected/3defe8c3-b07d-403b-9dd3-be8b88717fd0-kube-api-access-jd6td\") pod \"3defe8c3-b07d-403b-9dd3-be8b88717fd0\" (UID: \"3defe8c3-b07d-403b-9dd3-be8b88717fd0\") " Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.515898 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3defe8c3-b07d-403b-9dd3-be8b88717fd0-catalog-content\") pod \"3defe8c3-b07d-403b-9dd3-be8b88717fd0\" (UID: \"3defe8c3-b07d-403b-9dd3-be8b88717fd0\") " Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.515973 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3defe8c3-b07d-403b-9dd3-be8b88717fd0-utilities\") pod \"3defe8c3-b07d-403b-9dd3-be8b88717fd0\" (UID: \"3defe8c3-b07d-403b-9dd3-be8b88717fd0\") " Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.516777 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3defe8c3-b07d-403b-9dd3-be8b88717fd0-utilities" (OuterVolumeSpecName: "utilities") pod "3defe8c3-b07d-403b-9dd3-be8b88717fd0" (UID: "3defe8c3-b07d-403b-9dd3-be8b88717fd0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.523257 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3defe8c3-b07d-403b-9dd3-be8b88717fd0-kube-api-access-jd6td" (OuterVolumeSpecName: "kube-api-access-jd6td") pod "3defe8c3-b07d-403b-9dd3-be8b88717fd0" (UID: "3defe8c3-b07d-403b-9dd3-be8b88717fd0"). InnerVolumeSpecName "kube-api-access-jd6td". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.566084 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3defe8c3-b07d-403b-9dd3-be8b88717fd0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3defe8c3-b07d-403b-9dd3-be8b88717fd0" (UID: "3defe8c3-b07d-403b-9dd3-be8b88717fd0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.618279 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3defe8c3-b07d-403b-9dd3-be8b88717fd0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.618323 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3defe8c3-b07d-403b-9dd3-be8b88717fd0-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.618334 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jd6td\" (UniqueName: \"kubernetes.io/projected/3defe8c3-b07d-403b-9dd3-be8b88717fd0-kube-api-access-jd6td\") on node \"crc\" DevicePath \"\"" Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.752962 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5m6lt"] Jan 06 09:36:50 crc kubenswrapper[4784]: I0106 09:36:50.759525 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5m6lt"] Jan 06 09:36:52 crc kubenswrapper[4784]: I0106 09:36:52.327215 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3defe8c3-b07d-403b-9dd3-be8b88717fd0" path="/var/lib/kubelet/pods/3defe8c3-b07d-403b-9dd3-be8b88717fd0/volumes" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.522367 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wdm9h"] Jan 06 09:36:54 crc kubenswrapper[4784]: E0106 09:36:54.522822 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3defe8c3-b07d-403b-9dd3-be8b88717fd0" containerName="extract-utilities" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.522843 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3defe8c3-b07d-403b-9dd3-be8b88717fd0" containerName="extract-utilities" Jan 06 09:36:54 crc kubenswrapper[4784]: E0106 09:36:54.522875 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3defe8c3-b07d-403b-9dd3-be8b88717fd0" containerName="registry-server" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.522887 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3defe8c3-b07d-403b-9dd3-be8b88717fd0" containerName="registry-server" Jan 06 09:36:54 crc kubenswrapper[4784]: E0106 09:36:54.522911 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3defe8c3-b07d-403b-9dd3-be8b88717fd0" containerName="extract-content" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.522924 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3defe8c3-b07d-403b-9dd3-be8b88717fd0" containerName="extract-content" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.523200 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="3defe8c3-b07d-403b-9dd3-be8b88717fd0" containerName="registry-server" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.524850 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.547534 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wdm9h"] Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.681996 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c65632b-e212-4220-8621-18ec84fda50a-catalog-content\") pod \"redhat-operators-wdm9h\" (UID: \"9c65632b-e212-4220-8621-18ec84fda50a\") " pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.682338 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkbl7\" (UniqueName: \"kubernetes.io/projected/9c65632b-e212-4220-8621-18ec84fda50a-kube-api-access-gkbl7\") pod \"redhat-operators-wdm9h\" (UID: \"9c65632b-e212-4220-8621-18ec84fda50a\") " pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.682420 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c65632b-e212-4220-8621-18ec84fda50a-utilities\") pod \"redhat-operators-wdm9h\" (UID: \"9c65632b-e212-4220-8621-18ec84fda50a\") " pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.784079 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c65632b-e212-4220-8621-18ec84fda50a-catalog-content\") pod \"redhat-operators-wdm9h\" (UID: \"9c65632b-e212-4220-8621-18ec84fda50a\") " pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.784481 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkbl7\" (UniqueName: \"kubernetes.io/projected/9c65632b-e212-4220-8621-18ec84fda50a-kube-api-access-gkbl7\") pod \"redhat-operators-wdm9h\" (UID: \"9c65632b-e212-4220-8621-18ec84fda50a\") " pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.784512 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c65632b-e212-4220-8621-18ec84fda50a-utilities\") pod \"redhat-operators-wdm9h\" (UID: \"9c65632b-e212-4220-8621-18ec84fda50a\") " pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.784979 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c65632b-e212-4220-8621-18ec84fda50a-catalog-content\") pod \"redhat-operators-wdm9h\" (UID: \"9c65632b-e212-4220-8621-18ec84fda50a\") " pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.785010 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c65632b-e212-4220-8621-18ec84fda50a-utilities\") pod \"redhat-operators-wdm9h\" (UID: \"9c65632b-e212-4220-8621-18ec84fda50a\") " pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.816653 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkbl7\" (UniqueName: \"kubernetes.io/projected/9c65632b-e212-4220-8621-18ec84fda50a-kube-api-access-gkbl7\") pod \"redhat-operators-wdm9h\" (UID: \"9c65632b-e212-4220-8621-18ec84fda50a\") " pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:36:54 crc kubenswrapper[4784]: I0106 09:36:54.851498 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:36:55 crc kubenswrapper[4784]: I0106 09:36:55.308095 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wdm9h"] Jan 06 09:36:55 crc kubenswrapper[4784]: W0106 09:36:55.325872 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9c65632b_e212_4220_8621_18ec84fda50a.slice/crio-0008ba4160f758f61489c89a6393b8c63840fb417d6787f617db18279207e53f WatchSource:0}: Error finding container 0008ba4160f758f61489c89a6393b8c63840fb417d6787f617db18279207e53f: Status 404 returned error can't find the container with id 0008ba4160f758f61489c89a6393b8c63840fb417d6787f617db18279207e53f Jan 06 09:36:55 crc kubenswrapper[4784]: I0106 09:36:55.454419 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdm9h" event={"ID":"9c65632b-e212-4220-8621-18ec84fda50a","Type":"ContainerStarted","Data":"0008ba4160f758f61489c89a6393b8c63840fb417d6787f617db18279207e53f"} Jan 06 09:36:56 crc kubenswrapper[4784]: I0106 09:36:56.466284 4784 generic.go:334] "Generic (PLEG): container finished" podID="9c65632b-e212-4220-8621-18ec84fda50a" containerID="59bfa6926e7440d1b0fe110695ff34d07569456b45d819046eccbb74676cff9f" exitCode=0 Jan 06 09:36:56 crc kubenswrapper[4784]: I0106 09:36:56.466354 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdm9h" event={"ID":"9c65632b-e212-4220-8621-18ec84fda50a","Type":"ContainerDied","Data":"59bfa6926e7440d1b0fe110695ff34d07569456b45d819046eccbb74676cff9f"} Jan 06 09:36:58 crc kubenswrapper[4784]: I0106 09:36:58.486135 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdm9h" event={"ID":"9c65632b-e212-4220-8621-18ec84fda50a","Type":"ContainerStarted","Data":"e629aa662cbcb57c59763f55985006eceaf0d161e42391056a0a6fafb974e5bd"} Jan 06 09:36:59 crc kubenswrapper[4784]: I0106 09:36:59.498150 4784 generic.go:334] "Generic (PLEG): container finished" podID="9c65632b-e212-4220-8621-18ec84fda50a" containerID="e629aa662cbcb57c59763f55985006eceaf0d161e42391056a0a6fafb974e5bd" exitCode=0 Jan 06 09:36:59 crc kubenswrapper[4784]: I0106 09:36:59.498240 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdm9h" event={"ID":"9c65632b-e212-4220-8621-18ec84fda50a","Type":"ContainerDied","Data":"e629aa662cbcb57c59763f55985006eceaf0d161e42391056a0a6fafb974e5bd"} Jan 06 09:37:00 crc kubenswrapper[4784]: I0106 09:37:00.511904 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdm9h" event={"ID":"9c65632b-e212-4220-8621-18ec84fda50a","Type":"ContainerStarted","Data":"0f10a967bbb0042dcccdb90f9c70c94242945fd5028aa6df0ac321c23d8cfa4c"} Jan 06 09:37:00 crc kubenswrapper[4784]: I0106 09:37:00.536847 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wdm9h" podStartSLOduration=3.073140652 podStartE2EDuration="6.536819549s" podCreationTimestamp="2026-01-06 09:36:54 +0000 UTC" firstStartedPulling="2026-01-06 09:36:56.469120109 +0000 UTC m=+4918.515292966" lastFinishedPulling="2026-01-06 09:36:59.932798986 +0000 UTC m=+4921.978971863" observedRunningTime="2026-01-06 09:37:00.530596886 +0000 UTC m=+4922.576769763" watchObservedRunningTime="2026-01-06 09:37:00.536819549 +0000 UTC m=+4922.582992426" Jan 06 09:37:04 crc kubenswrapper[4784]: I0106 09:37:04.853263 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:37:04 crc kubenswrapper[4784]: I0106 09:37:04.853690 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:37:05 crc kubenswrapper[4784]: I0106 09:37:05.927351 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wdm9h" podUID="9c65632b-e212-4220-8621-18ec84fda50a" containerName="registry-server" probeResult="failure" output=< Jan 06 09:37:05 crc kubenswrapper[4784]: timeout: failed to connect service ":50051" within 1s Jan 06 09:37:05 crc kubenswrapper[4784]: > Jan 06 09:37:14 crc kubenswrapper[4784]: I0106 09:37:14.934402 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:37:15 crc kubenswrapper[4784]: I0106 09:37:15.022197 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:37:15 crc kubenswrapper[4784]: I0106 09:37:15.183237 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wdm9h"] Jan 06 09:37:16 crc kubenswrapper[4784]: I0106 09:37:16.655709 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wdm9h" podUID="9c65632b-e212-4220-8621-18ec84fda50a" containerName="registry-server" containerID="cri-o://0f10a967bbb0042dcccdb90f9c70c94242945fd5028aa6df0ac321c23d8cfa4c" gracePeriod=2 Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.134371 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.166725 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c65632b-e212-4220-8621-18ec84fda50a-catalog-content\") pod \"9c65632b-e212-4220-8621-18ec84fda50a\" (UID: \"9c65632b-e212-4220-8621-18ec84fda50a\") " Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.166877 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkbl7\" (UniqueName: \"kubernetes.io/projected/9c65632b-e212-4220-8621-18ec84fda50a-kube-api-access-gkbl7\") pod \"9c65632b-e212-4220-8621-18ec84fda50a\" (UID: \"9c65632b-e212-4220-8621-18ec84fda50a\") " Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.166965 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c65632b-e212-4220-8621-18ec84fda50a-utilities\") pod \"9c65632b-e212-4220-8621-18ec84fda50a\" (UID: \"9c65632b-e212-4220-8621-18ec84fda50a\") " Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.169009 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c65632b-e212-4220-8621-18ec84fda50a-utilities" (OuterVolumeSpecName: "utilities") pod "9c65632b-e212-4220-8621-18ec84fda50a" (UID: "9c65632b-e212-4220-8621-18ec84fda50a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.175218 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c65632b-e212-4220-8621-18ec84fda50a-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.176860 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c65632b-e212-4220-8621-18ec84fda50a-kube-api-access-gkbl7" (OuterVolumeSpecName: "kube-api-access-gkbl7") pod "9c65632b-e212-4220-8621-18ec84fda50a" (UID: "9c65632b-e212-4220-8621-18ec84fda50a"). InnerVolumeSpecName "kube-api-access-gkbl7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.276833 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkbl7\" (UniqueName: \"kubernetes.io/projected/9c65632b-e212-4220-8621-18ec84fda50a-kube-api-access-gkbl7\") on node \"crc\" DevicePath \"\"" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.333330 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c65632b-e212-4220-8621-18ec84fda50a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9c65632b-e212-4220-8621-18ec84fda50a" (UID: "9c65632b-e212-4220-8621-18ec84fda50a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.378776 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c65632b-e212-4220-8621-18ec84fda50a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.671629 4784 generic.go:334] "Generic (PLEG): container finished" podID="9c65632b-e212-4220-8621-18ec84fda50a" containerID="0f10a967bbb0042dcccdb90f9c70c94242945fd5028aa6df0ac321c23d8cfa4c" exitCode=0 Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.671707 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdm9h" event={"ID":"9c65632b-e212-4220-8621-18ec84fda50a","Type":"ContainerDied","Data":"0f10a967bbb0042dcccdb90f9c70c94242945fd5028aa6df0ac321c23d8cfa4c"} Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.671778 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdm9h" event={"ID":"9c65632b-e212-4220-8621-18ec84fda50a","Type":"ContainerDied","Data":"0008ba4160f758f61489c89a6393b8c63840fb417d6787f617db18279207e53f"} Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.671820 4784 scope.go:117] "RemoveContainer" containerID="0f10a967bbb0042dcccdb90f9c70c94242945fd5028aa6df0ac321c23d8cfa4c" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.671727 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdm9h" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.717454 4784 scope.go:117] "RemoveContainer" containerID="e629aa662cbcb57c59763f55985006eceaf0d161e42391056a0a6fafb974e5bd" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.725035 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wdm9h"] Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.735838 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wdm9h"] Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.749558 4784 scope.go:117] "RemoveContainer" containerID="59bfa6926e7440d1b0fe110695ff34d07569456b45d819046eccbb74676cff9f" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.792941 4784 scope.go:117] "RemoveContainer" containerID="0f10a967bbb0042dcccdb90f9c70c94242945fd5028aa6df0ac321c23d8cfa4c" Jan 06 09:37:17 crc kubenswrapper[4784]: E0106 09:37:17.793642 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f10a967bbb0042dcccdb90f9c70c94242945fd5028aa6df0ac321c23d8cfa4c\": container with ID starting with 0f10a967bbb0042dcccdb90f9c70c94242945fd5028aa6df0ac321c23d8cfa4c not found: ID does not exist" containerID="0f10a967bbb0042dcccdb90f9c70c94242945fd5028aa6df0ac321c23d8cfa4c" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.793724 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f10a967bbb0042dcccdb90f9c70c94242945fd5028aa6df0ac321c23d8cfa4c"} err="failed to get container status \"0f10a967bbb0042dcccdb90f9c70c94242945fd5028aa6df0ac321c23d8cfa4c\": rpc error: code = NotFound desc = could not find container \"0f10a967bbb0042dcccdb90f9c70c94242945fd5028aa6df0ac321c23d8cfa4c\": container with ID starting with 0f10a967bbb0042dcccdb90f9c70c94242945fd5028aa6df0ac321c23d8cfa4c not found: ID does not exist" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.793768 4784 scope.go:117] "RemoveContainer" containerID="e629aa662cbcb57c59763f55985006eceaf0d161e42391056a0a6fafb974e5bd" Jan 06 09:37:17 crc kubenswrapper[4784]: E0106 09:37:17.794355 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e629aa662cbcb57c59763f55985006eceaf0d161e42391056a0a6fafb974e5bd\": container with ID starting with e629aa662cbcb57c59763f55985006eceaf0d161e42391056a0a6fafb974e5bd not found: ID does not exist" containerID="e629aa662cbcb57c59763f55985006eceaf0d161e42391056a0a6fafb974e5bd" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.794418 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e629aa662cbcb57c59763f55985006eceaf0d161e42391056a0a6fafb974e5bd"} err="failed to get container status \"e629aa662cbcb57c59763f55985006eceaf0d161e42391056a0a6fafb974e5bd\": rpc error: code = NotFound desc = could not find container \"e629aa662cbcb57c59763f55985006eceaf0d161e42391056a0a6fafb974e5bd\": container with ID starting with e629aa662cbcb57c59763f55985006eceaf0d161e42391056a0a6fafb974e5bd not found: ID does not exist" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.794460 4784 scope.go:117] "RemoveContainer" containerID="59bfa6926e7440d1b0fe110695ff34d07569456b45d819046eccbb74676cff9f" Jan 06 09:37:17 crc kubenswrapper[4784]: E0106 09:37:17.794963 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59bfa6926e7440d1b0fe110695ff34d07569456b45d819046eccbb74676cff9f\": container with ID starting with 59bfa6926e7440d1b0fe110695ff34d07569456b45d819046eccbb74676cff9f not found: ID does not exist" containerID="59bfa6926e7440d1b0fe110695ff34d07569456b45d819046eccbb74676cff9f" Jan 06 09:37:17 crc kubenswrapper[4784]: I0106 09:37:17.795020 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59bfa6926e7440d1b0fe110695ff34d07569456b45d819046eccbb74676cff9f"} err="failed to get container status \"59bfa6926e7440d1b0fe110695ff34d07569456b45d819046eccbb74676cff9f\": rpc error: code = NotFound desc = could not find container \"59bfa6926e7440d1b0fe110695ff34d07569456b45d819046eccbb74676cff9f\": container with ID starting with 59bfa6926e7440d1b0fe110695ff34d07569456b45d819046eccbb74676cff9f not found: ID does not exist" Jan 06 09:37:18 crc kubenswrapper[4784]: I0106 09:37:18.328915 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c65632b-e212-4220-8621-18ec84fda50a" path="/var/lib/kubelet/pods/9c65632b-e212-4220-8621-18ec84fda50a/volumes" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.279028 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r45ld"] Jan 06 09:37:25 crc kubenswrapper[4784]: E0106 09:37:25.280747 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c65632b-e212-4220-8621-18ec84fda50a" containerName="extract-content" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.280849 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c65632b-e212-4220-8621-18ec84fda50a" containerName="extract-content" Jan 06 09:37:25 crc kubenswrapper[4784]: E0106 09:37:25.280955 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c65632b-e212-4220-8621-18ec84fda50a" containerName="extract-utilities" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.281038 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c65632b-e212-4220-8621-18ec84fda50a" containerName="extract-utilities" Jan 06 09:37:25 crc kubenswrapper[4784]: E0106 09:37:25.281128 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c65632b-e212-4220-8621-18ec84fda50a" containerName="registry-server" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.281206 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c65632b-e212-4220-8621-18ec84fda50a" containerName="registry-server" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.281460 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c65632b-e212-4220-8621-18ec84fda50a" containerName="registry-server" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.282754 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.286208 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r45ld"] Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.302143 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4m5t8\" (UniqueName: \"kubernetes.io/projected/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-kube-api-access-4m5t8\") pod \"certified-operators-r45ld\" (UID: \"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0\") " pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.302348 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-catalog-content\") pod \"certified-operators-r45ld\" (UID: \"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0\") " pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.302446 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-utilities\") pod \"certified-operators-r45ld\" (UID: \"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0\") " pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.403951 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-catalog-content\") pod \"certified-operators-r45ld\" (UID: \"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0\") " pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.404329 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-utilities\") pod \"certified-operators-r45ld\" (UID: \"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0\") " pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.404523 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4m5t8\" (UniqueName: \"kubernetes.io/projected/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-kube-api-access-4m5t8\") pod \"certified-operators-r45ld\" (UID: \"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0\") " pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.405119 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-utilities\") pod \"certified-operators-r45ld\" (UID: \"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0\") " pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.405517 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-catalog-content\") pod \"certified-operators-r45ld\" (UID: \"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0\") " pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.433906 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4m5t8\" (UniqueName: \"kubernetes.io/projected/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-kube-api-access-4m5t8\") pod \"certified-operators-r45ld\" (UID: \"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0\") " pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.607105 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:25 crc kubenswrapper[4784]: I0106 09:37:25.883098 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r45ld"] Jan 06 09:37:25 crc kubenswrapper[4784]: W0106 09:37:25.894601 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4ea4d4c6_ee69_4a1a_8036_bf098351ebc0.slice/crio-a8a4bdeca97640a5892abaafc8367d80c00d2778b97ecf0f0199a1d032ea5bde WatchSource:0}: Error finding container a8a4bdeca97640a5892abaafc8367d80c00d2778b97ecf0f0199a1d032ea5bde: Status 404 returned error can't find the container with id a8a4bdeca97640a5892abaafc8367d80c00d2778b97ecf0f0199a1d032ea5bde Jan 06 09:37:26 crc kubenswrapper[4784]: I0106 09:37:26.758579 4784 generic.go:334] "Generic (PLEG): container finished" podID="4ea4d4c6-ee69-4a1a-8036-bf098351ebc0" containerID="fcd220138fd48c583f61e81d3dbc364e133a0a5e9e94362088fe6b5862c4aa7d" exitCode=0 Jan 06 09:37:26 crc kubenswrapper[4784]: I0106 09:37:26.759002 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r45ld" event={"ID":"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0","Type":"ContainerDied","Data":"fcd220138fd48c583f61e81d3dbc364e133a0a5e9e94362088fe6b5862c4aa7d"} Jan 06 09:37:26 crc kubenswrapper[4784]: I0106 09:37:26.759047 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r45ld" event={"ID":"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0","Type":"ContainerStarted","Data":"a8a4bdeca97640a5892abaafc8367d80c00d2778b97ecf0f0199a1d032ea5bde"} Jan 06 09:37:27 crc kubenswrapper[4784]: I0106 09:37:27.773968 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r45ld" event={"ID":"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0","Type":"ContainerStarted","Data":"cf3856b8c56a7a4c4eb69e3b7f154b7b90254819ac221477dd287e8fa0b47373"} Jan 06 09:37:28 crc kubenswrapper[4784]: I0106 09:37:28.793503 4784 generic.go:334] "Generic (PLEG): container finished" podID="4ea4d4c6-ee69-4a1a-8036-bf098351ebc0" containerID="cf3856b8c56a7a4c4eb69e3b7f154b7b90254819ac221477dd287e8fa0b47373" exitCode=0 Jan 06 09:37:28 crc kubenswrapper[4784]: I0106 09:37:28.793684 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r45ld" event={"ID":"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0","Type":"ContainerDied","Data":"cf3856b8c56a7a4c4eb69e3b7f154b7b90254819ac221477dd287e8fa0b47373"} Jan 06 09:37:29 crc kubenswrapper[4784]: I0106 09:37:29.804677 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r45ld" event={"ID":"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0","Type":"ContainerStarted","Data":"4cc641a864db65bd90be70ff3e60e42c2124a40c27e1e0162ca66973a4d59328"} Jan 06 09:37:35 crc kubenswrapper[4784]: I0106 09:37:35.607592 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:35 crc kubenswrapper[4784]: I0106 09:37:35.608031 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:35 crc kubenswrapper[4784]: I0106 09:37:35.715880 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:35 crc kubenswrapper[4784]: I0106 09:37:35.741236 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r45ld" podStartSLOduration=8.293143352 podStartE2EDuration="10.741220279s" podCreationTimestamp="2026-01-06 09:37:25 +0000 UTC" firstStartedPulling="2026-01-06 09:37:26.761458651 +0000 UTC m=+4948.807631528" lastFinishedPulling="2026-01-06 09:37:29.209535608 +0000 UTC m=+4951.255708455" observedRunningTime="2026-01-06 09:37:29.828803585 +0000 UTC m=+4951.874976432" watchObservedRunningTime="2026-01-06 09:37:35.741220279 +0000 UTC m=+4957.787393116" Jan 06 09:37:35 crc kubenswrapper[4784]: I0106 09:37:35.895761 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:35 crc kubenswrapper[4784]: I0106 09:37:35.959868 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r45ld"] Jan 06 09:37:37 crc kubenswrapper[4784]: I0106 09:37:37.870913 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r45ld" podUID="4ea4d4c6-ee69-4a1a-8036-bf098351ebc0" containerName="registry-server" containerID="cri-o://4cc641a864db65bd90be70ff3e60e42c2124a40c27e1e0162ca66973a4d59328" gracePeriod=2 Jan 06 09:37:38 crc kubenswrapper[4784]: I0106 09:37:38.883509 4784 generic.go:334] "Generic (PLEG): container finished" podID="4ea4d4c6-ee69-4a1a-8036-bf098351ebc0" containerID="4cc641a864db65bd90be70ff3e60e42c2124a40c27e1e0162ca66973a4d59328" exitCode=0 Jan 06 09:37:38 crc kubenswrapper[4784]: I0106 09:37:38.883603 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r45ld" event={"ID":"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0","Type":"ContainerDied","Data":"4cc641a864db65bd90be70ff3e60e42c2124a40c27e1e0162ca66973a4d59328"} Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.486446 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.603475 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4m5t8\" (UniqueName: \"kubernetes.io/projected/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-kube-api-access-4m5t8\") pod \"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0\" (UID: \"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0\") " Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.603538 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-utilities\") pod \"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0\" (UID: \"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0\") " Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.603732 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-catalog-content\") pod \"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0\" (UID: \"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0\") " Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.605851 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-utilities" (OuterVolumeSpecName: "utilities") pod "4ea4d4c6-ee69-4a1a-8036-bf098351ebc0" (UID: "4ea4d4c6-ee69-4a1a-8036-bf098351ebc0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.611803 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-kube-api-access-4m5t8" (OuterVolumeSpecName: "kube-api-access-4m5t8") pod "4ea4d4c6-ee69-4a1a-8036-bf098351ebc0" (UID: "4ea4d4c6-ee69-4a1a-8036-bf098351ebc0"). InnerVolumeSpecName "kube-api-access-4m5t8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.657330 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ea4d4c6-ee69-4a1a-8036-bf098351ebc0" (UID: "4ea4d4c6-ee69-4a1a-8036-bf098351ebc0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.705701 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.705757 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4m5t8\" (UniqueName: \"kubernetes.io/projected/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-kube-api-access-4m5t8\") on node \"crc\" DevicePath \"\"" Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.705780 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.894359 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r45ld" event={"ID":"4ea4d4c6-ee69-4a1a-8036-bf098351ebc0","Type":"ContainerDied","Data":"a8a4bdeca97640a5892abaafc8367d80c00d2778b97ecf0f0199a1d032ea5bde"} Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.894432 4784 scope.go:117] "RemoveContainer" containerID="4cc641a864db65bd90be70ff3e60e42c2124a40c27e1e0162ca66973a4d59328" Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.894479 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r45ld" Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.920067 4784 scope.go:117] "RemoveContainer" containerID="cf3856b8c56a7a4c4eb69e3b7f154b7b90254819ac221477dd287e8fa0b47373" Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.947067 4784 scope.go:117] "RemoveContainer" containerID="fcd220138fd48c583f61e81d3dbc364e133a0a5e9e94362088fe6b5862c4aa7d" Jan 06 09:37:39 crc kubenswrapper[4784]: I0106 09:37:39.986852 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r45ld"] Jan 06 09:37:40 crc kubenswrapper[4784]: I0106 09:37:40.001803 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r45ld"] Jan 06 09:37:40 crc kubenswrapper[4784]: I0106 09:37:40.326508 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ea4d4c6-ee69-4a1a-8036-bf098351ebc0" path="/var/lib/kubelet/pods/4ea4d4c6-ee69-4a1a-8036-bf098351ebc0/volumes" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.890950 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-f7l22"] Jan 06 09:38:06 crc kubenswrapper[4784]: E0106 09:38:06.891605 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ea4d4c6-ee69-4a1a-8036-bf098351ebc0" containerName="registry-server" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.891617 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ea4d4c6-ee69-4a1a-8036-bf098351ebc0" containerName="registry-server" Jan 06 09:38:06 crc kubenswrapper[4784]: E0106 09:38:06.891627 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ea4d4c6-ee69-4a1a-8036-bf098351ebc0" containerName="extract-utilities" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.891634 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ea4d4c6-ee69-4a1a-8036-bf098351ebc0" containerName="extract-utilities" Jan 06 09:38:06 crc kubenswrapper[4784]: E0106 09:38:06.891660 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ea4d4c6-ee69-4a1a-8036-bf098351ebc0" containerName="extract-content" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.891665 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ea4d4c6-ee69-4a1a-8036-bf098351ebc0" containerName="extract-content" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.891794 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ea4d4c6-ee69-4a1a-8036-bf098351ebc0" containerName="registry-server" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.892415 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5986db9b4f-f7l22" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.899589 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.899857 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-cnntp" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.900083 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.900247 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.915457 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-ht65f"] Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.916515 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.919259 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-f7l22"] Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.920337 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.962906 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cafed0b5-67b1-49dc-934a-096b892dce98-config\") pod \"dnsmasq-dns-56bbd59dc5-ht65f\" (UID: \"cafed0b5-67b1-49dc-934a-096b892dce98\") " pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.962986 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cafed0b5-67b1-49dc-934a-096b892dce98-dns-svc\") pod \"dnsmasq-dns-56bbd59dc5-ht65f\" (UID: \"cafed0b5-67b1-49dc-934a-096b892dce98\") " pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.963014 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgft6\" (UniqueName: \"kubernetes.io/projected/cafed0b5-67b1-49dc-934a-096b892dce98-kube-api-access-fgft6\") pod \"dnsmasq-dns-56bbd59dc5-ht65f\" (UID: \"cafed0b5-67b1-49dc-934a-096b892dce98\") " pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.963043 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3425a948-7ac4-4b61-b82c-7e21c24ed09a-config\") pod \"dnsmasq-dns-5986db9b4f-f7l22\" (UID: \"3425a948-7ac4-4b61-b82c-7e21c24ed09a\") " pod="openstack/dnsmasq-dns-5986db9b4f-f7l22" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.963238 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbfg5\" (UniqueName: \"kubernetes.io/projected/3425a948-7ac4-4b61-b82c-7e21c24ed09a-kube-api-access-gbfg5\") pod \"dnsmasq-dns-5986db9b4f-f7l22\" (UID: \"3425a948-7ac4-4b61-b82c-7e21c24ed09a\") " pod="openstack/dnsmasq-dns-5986db9b4f-f7l22" Jan 06 09:38:06 crc kubenswrapper[4784]: I0106 09:38:06.964823 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-ht65f"] Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.064494 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbfg5\" (UniqueName: \"kubernetes.io/projected/3425a948-7ac4-4b61-b82c-7e21c24ed09a-kube-api-access-gbfg5\") pod \"dnsmasq-dns-5986db9b4f-f7l22\" (UID: \"3425a948-7ac4-4b61-b82c-7e21c24ed09a\") " pod="openstack/dnsmasq-dns-5986db9b4f-f7l22" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.064580 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cafed0b5-67b1-49dc-934a-096b892dce98-config\") pod \"dnsmasq-dns-56bbd59dc5-ht65f\" (UID: \"cafed0b5-67b1-49dc-934a-096b892dce98\") " pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.064619 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cafed0b5-67b1-49dc-934a-096b892dce98-dns-svc\") pod \"dnsmasq-dns-56bbd59dc5-ht65f\" (UID: \"cafed0b5-67b1-49dc-934a-096b892dce98\") " pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.064660 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgft6\" (UniqueName: \"kubernetes.io/projected/cafed0b5-67b1-49dc-934a-096b892dce98-kube-api-access-fgft6\") pod \"dnsmasq-dns-56bbd59dc5-ht65f\" (UID: \"cafed0b5-67b1-49dc-934a-096b892dce98\") " pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.064681 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3425a948-7ac4-4b61-b82c-7e21c24ed09a-config\") pod \"dnsmasq-dns-5986db9b4f-f7l22\" (UID: \"3425a948-7ac4-4b61-b82c-7e21c24ed09a\") " pod="openstack/dnsmasq-dns-5986db9b4f-f7l22" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.065415 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cafed0b5-67b1-49dc-934a-096b892dce98-config\") pod \"dnsmasq-dns-56bbd59dc5-ht65f\" (UID: \"cafed0b5-67b1-49dc-934a-096b892dce98\") " pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.065415 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cafed0b5-67b1-49dc-934a-096b892dce98-dns-svc\") pod \"dnsmasq-dns-56bbd59dc5-ht65f\" (UID: \"cafed0b5-67b1-49dc-934a-096b892dce98\") " pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.065957 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3425a948-7ac4-4b61-b82c-7e21c24ed09a-config\") pod \"dnsmasq-dns-5986db9b4f-f7l22\" (UID: \"3425a948-7ac4-4b61-b82c-7e21c24ed09a\") " pod="openstack/dnsmasq-dns-5986db9b4f-f7l22" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.085188 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgft6\" (UniqueName: \"kubernetes.io/projected/cafed0b5-67b1-49dc-934a-096b892dce98-kube-api-access-fgft6\") pod \"dnsmasq-dns-56bbd59dc5-ht65f\" (UID: \"cafed0b5-67b1-49dc-934a-096b892dce98\") " pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.087245 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbfg5\" (UniqueName: \"kubernetes.io/projected/3425a948-7ac4-4b61-b82c-7e21c24ed09a-kube-api-access-gbfg5\") pod \"dnsmasq-dns-5986db9b4f-f7l22\" (UID: \"3425a948-7ac4-4b61-b82c-7e21c24ed09a\") " pod="openstack/dnsmasq-dns-5986db9b4f-f7l22" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.216232 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5986db9b4f-f7l22" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.230914 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.312669 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-ht65f"] Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.356120 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-865d9b578f-llvj5"] Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.357260 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865d9b578f-llvj5" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.371383 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-config\") pod \"dnsmasq-dns-865d9b578f-llvj5\" (UID: \"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb\") " pod="openstack/dnsmasq-dns-865d9b578f-llvj5" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.371441 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9prv\" (UniqueName: \"kubernetes.io/projected/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-kube-api-access-g9prv\") pod \"dnsmasq-dns-865d9b578f-llvj5\" (UID: \"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb\") " pod="openstack/dnsmasq-dns-865d9b578f-llvj5" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.371503 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-dns-svc\") pod \"dnsmasq-dns-865d9b578f-llvj5\" (UID: \"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb\") " pod="openstack/dnsmasq-dns-865d9b578f-llvj5" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.374259 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865d9b578f-llvj5"] Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.472919 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-config\") pod \"dnsmasq-dns-865d9b578f-llvj5\" (UID: \"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb\") " pod="openstack/dnsmasq-dns-865d9b578f-llvj5" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.473234 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9prv\" (UniqueName: \"kubernetes.io/projected/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-kube-api-access-g9prv\") pod \"dnsmasq-dns-865d9b578f-llvj5\" (UID: \"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb\") " pod="openstack/dnsmasq-dns-865d9b578f-llvj5" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.473288 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-dns-svc\") pod \"dnsmasq-dns-865d9b578f-llvj5\" (UID: \"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb\") " pod="openstack/dnsmasq-dns-865d9b578f-llvj5" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.473963 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-config\") pod \"dnsmasq-dns-865d9b578f-llvj5\" (UID: \"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb\") " pod="openstack/dnsmasq-dns-865d9b578f-llvj5" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.474060 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-dns-svc\") pod \"dnsmasq-dns-865d9b578f-llvj5\" (UID: \"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb\") " pod="openstack/dnsmasq-dns-865d9b578f-llvj5" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.490996 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9prv\" (UniqueName: \"kubernetes.io/projected/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-kube-api-access-g9prv\") pod \"dnsmasq-dns-865d9b578f-llvj5\" (UID: \"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb\") " pod="openstack/dnsmasq-dns-865d9b578f-llvj5" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.675875 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-f7l22"] Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.693196 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865d9b578f-llvj5" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.710097 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-6d2wm"] Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.711945 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.724737 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-6d2wm"] Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.776399 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-dns-svc\") pod \"dnsmasq-dns-5d79f765b5-6d2wm\" (UID: \"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5\") " pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.776700 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqz7f\" (UniqueName: \"kubernetes.io/projected/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-kube-api-access-rqz7f\") pod \"dnsmasq-dns-5d79f765b5-6d2wm\" (UID: \"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5\") " pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.776854 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-config\") pod \"dnsmasq-dns-5d79f765b5-6d2wm\" (UID: \"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5\") " pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.776447 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-f7l22"] Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.877462 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-dns-svc\") pod \"dnsmasq-dns-5d79f765b5-6d2wm\" (UID: \"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5\") " pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.877532 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqz7f\" (UniqueName: \"kubernetes.io/projected/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-kube-api-access-rqz7f\") pod \"dnsmasq-dns-5d79f765b5-6d2wm\" (UID: \"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5\") " pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.877611 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-config\") pod \"dnsmasq-dns-5d79f765b5-6d2wm\" (UID: \"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5\") " pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.878570 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-config\") pod \"dnsmasq-dns-5d79f765b5-6d2wm\" (UID: \"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5\") " pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.878589 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-dns-svc\") pod \"dnsmasq-dns-5d79f765b5-6d2wm\" (UID: \"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5\") " pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.882953 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-ht65f"] Jan 06 09:38:07 crc kubenswrapper[4784]: I0106 09:38:07.904845 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqz7f\" (UniqueName: \"kubernetes.io/projected/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-kube-api-access-rqz7f\") pod \"dnsmasq-dns-5d79f765b5-6d2wm\" (UID: \"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5\") " pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.035063 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.164156 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" event={"ID":"cafed0b5-67b1-49dc-934a-096b892dce98","Type":"ContainerStarted","Data":"b75dc5d60112f2e5de60d3b1e9d862aaa25f44c31e360cdc2b74747b32cf25d5"} Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.164465 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" event={"ID":"cafed0b5-67b1-49dc-934a-096b892dce98","Type":"ContainerStarted","Data":"025cd097c79dfc14337e80eb8b129bf8d5177660e3422ab2089da14dc47f6cf7"} Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.164632 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" podUID="cafed0b5-67b1-49dc-934a-096b892dce98" containerName="init" containerID="cri-o://b75dc5d60112f2e5de60d3b1e9d862aaa25f44c31e360cdc2b74747b32cf25d5" gracePeriod=10 Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.173960 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5986db9b4f-f7l22" event={"ID":"3425a948-7ac4-4b61-b82c-7e21c24ed09a","Type":"ContainerStarted","Data":"c19bcffc7373a4fdf01882d81ebd9e859eda345b5e5ab7f4a843a6f32178f852"} Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.174004 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5986db9b4f-f7l22" event={"ID":"3425a948-7ac4-4b61-b82c-7e21c24ed09a","Type":"ContainerStarted","Data":"e2cbd1234f355323654e097773520d51be6d63eb1a628bda97864fb75b269346"} Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.439219 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865d9b578f-llvj5"] Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.481163 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.482760 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.486653 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.486740 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.486656 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.486876 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.486980 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.487022 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.487106 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-smbkr" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.493439 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.563265 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.590871 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.590927 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.590947 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.590963 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.591027 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.591064 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.591089 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lv869\" (UniqueName: \"kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-kube-api-access-lv869\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.591113 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.591134 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.591161 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.591379 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.594539 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5986db9b4f-f7l22" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.647380 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-6d2wm"] Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.692483 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3425a948-7ac4-4b61-b82c-7e21c24ed09a-config\") pod \"3425a948-7ac4-4b61-b82c-7e21c24ed09a\" (UID: \"3425a948-7ac4-4b61-b82c-7e21c24ed09a\") " Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.692602 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgft6\" (UniqueName: \"kubernetes.io/projected/cafed0b5-67b1-49dc-934a-096b892dce98-kube-api-access-fgft6\") pod \"cafed0b5-67b1-49dc-934a-096b892dce98\" (UID: \"cafed0b5-67b1-49dc-934a-096b892dce98\") " Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.692673 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cafed0b5-67b1-49dc-934a-096b892dce98-dns-svc\") pod \"cafed0b5-67b1-49dc-934a-096b892dce98\" (UID: \"cafed0b5-67b1-49dc-934a-096b892dce98\") " Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.692749 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cafed0b5-67b1-49dc-934a-096b892dce98-config\") pod \"cafed0b5-67b1-49dc-934a-096b892dce98\" (UID: \"cafed0b5-67b1-49dc-934a-096b892dce98\") " Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.692773 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gbfg5\" (UniqueName: \"kubernetes.io/projected/3425a948-7ac4-4b61-b82c-7e21c24ed09a-kube-api-access-gbfg5\") pod \"3425a948-7ac4-4b61-b82c-7e21c24ed09a\" (UID: \"3425a948-7ac4-4b61-b82c-7e21c24ed09a\") " Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.692955 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.693015 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.693060 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.693081 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.693103 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.693133 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.693218 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.693247 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lv869\" (UniqueName: \"kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-kube-api-access-lv869\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.693278 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.693308 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.693346 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.693862 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.696191 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.696280 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.696563 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.697432 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.697836 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.697865 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cafed0b5-67b1-49dc-934a-096b892dce98-kube-api-access-fgft6" (OuterVolumeSpecName: "kube-api-access-fgft6") pod "cafed0b5-67b1-49dc-934a-096b892dce98" (UID: "cafed0b5-67b1-49dc-934a-096b892dce98"). InnerVolumeSpecName "kube-api-access-fgft6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.698902 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3425a948-7ac4-4b61-b82c-7e21c24ed09a-kube-api-access-gbfg5" (OuterVolumeSpecName: "kube-api-access-gbfg5") pod "3425a948-7ac4-4b61-b82c-7e21c24ed09a" (UID: "3425a948-7ac4-4b61-b82c-7e21c24ed09a"). InnerVolumeSpecName "kube-api-access-gbfg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.699495 4784 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.699521 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/c174523447f4e00f60312996a8e4573c039f70780f76021176193e3f554aa553/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.700976 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.702044 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.707839 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.712886 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lv869\" (UniqueName: \"kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-kube-api-access-lv869\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.713175 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cafed0b5-67b1-49dc-934a-096b892dce98-config" (OuterVolumeSpecName: "config") pod "cafed0b5-67b1-49dc-934a-096b892dce98" (UID: "cafed0b5-67b1-49dc-934a-096b892dce98"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.716875 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3425a948-7ac4-4b61-b82c-7e21c24ed09a-config" (OuterVolumeSpecName: "config") pod "3425a948-7ac4-4b61-b82c-7e21c24ed09a" (UID: "3425a948-7ac4-4b61-b82c-7e21c24ed09a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.716887 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cafed0b5-67b1-49dc-934a-096b892dce98-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cafed0b5-67b1-49dc-934a-096b892dce98" (UID: "cafed0b5-67b1-49dc-934a-096b892dce98"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.728735 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\") pod \"rabbitmq-cell1-server-0\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.794191 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cafed0b5-67b1-49dc-934a-096b892dce98-config\") on node \"crc\" DevicePath \"\"" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.794223 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gbfg5\" (UniqueName: \"kubernetes.io/projected/3425a948-7ac4-4b61-b82c-7e21c24ed09a-kube-api-access-gbfg5\") on node \"crc\" DevicePath \"\"" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.794233 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3425a948-7ac4-4b61-b82c-7e21c24ed09a-config\") on node \"crc\" DevicePath \"\"" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.794245 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgft6\" (UniqueName: \"kubernetes.io/projected/cafed0b5-67b1-49dc-934a-096b892dce98-kube-api-access-fgft6\") on node \"crc\" DevicePath \"\"" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.794254 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cafed0b5-67b1-49dc-934a-096b892dce98-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.841005 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 09:38:08 crc kubenswrapper[4784]: E0106 09:38:08.841275 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3425a948-7ac4-4b61-b82c-7e21c24ed09a" containerName="init" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.841287 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3425a948-7ac4-4b61-b82c-7e21c24ed09a" containerName="init" Jan 06 09:38:08 crc kubenswrapper[4784]: E0106 09:38:08.841299 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cafed0b5-67b1-49dc-934a-096b892dce98" containerName="init" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.841305 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="cafed0b5-67b1-49dc-934a-096b892dce98" containerName="init" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.841454 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="cafed0b5-67b1-49dc-934a-096b892dce98" containerName="init" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.841469 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="3425a948-7ac4-4b61-b82c-7e21c24ed09a" containerName="init" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.842112 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.845176 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.845382 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.845529 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.845527 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-zdktv" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.845910 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.846164 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.846245 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.859802 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.892776 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.997254 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.997336 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.997473 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.997516 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.997600 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/932c7932-8fbb-4943-b833-8481ed70de32-pod-info\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.997648 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbzwh\" (UniqueName: \"kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-kube-api-access-rbzwh\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.997680 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-server-conf\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.997783 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/932c7932-8fbb-4943-b833-8481ed70de32-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.997885 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a2b79ab4-175f-4949-badc-67d63d98703e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a2b79ab4-175f-4949-badc-67d63d98703e\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.997949 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:08 crc kubenswrapper[4784]: I0106 09:38:08.997994 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-config-data\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.099255 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.099326 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-config-data\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.099438 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.099480 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.099572 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.099609 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.099657 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/932c7932-8fbb-4943-b833-8481ed70de32-pod-info\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.099692 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-server-conf\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.099724 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbzwh\" (UniqueName: \"kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-kube-api-access-rbzwh\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.100226 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.100632 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.103481 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-config-data\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.102855 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-server-conf\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.101475 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.103508 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/932c7932-8fbb-4943-b833-8481ed70de32-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.104918 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a2b79ab4-175f-4949-badc-67d63d98703e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a2b79ab4-175f-4949-badc-67d63d98703e\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.106140 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.108345 4784 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.108573 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a2b79ab4-175f-4949-badc-67d63d98703e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a2b79ab4-175f-4949-badc-67d63d98703e\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/5f99e721f0b9676a57ee65d594f297905f01f38edfb31ea4d191271f234583dd/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.116438 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/932c7932-8fbb-4943-b833-8481ed70de32-pod-info\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.119508 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbzwh\" (UniqueName: \"kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-kube-api-access-rbzwh\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.120507 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.132359 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/932c7932-8fbb-4943-b833-8481ed70de32-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.192743 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a2b79ab4-175f-4949-badc-67d63d98703e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a2b79ab4-175f-4949-badc-67d63d98703e\") pod \"rabbitmq-server-0\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.196302 4784 generic.go:334] "Generic (PLEG): container finished" podID="3425a948-7ac4-4b61-b82c-7e21c24ed09a" containerID="c19bcffc7373a4fdf01882d81ebd9e859eda345b5e5ab7f4a843a6f32178f852" exitCode=0 Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.196407 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5986db9b4f-f7l22" event={"ID":"3425a948-7ac4-4b61-b82c-7e21c24ed09a","Type":"ContainerDied","Data":"c19bcffc7373a4fdf01882d81ebd9e859eda345b5e5ab7f4a843a6f32178f852"} Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.196480 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5986db9b4f-f7l22" event={"ID":"3425a948-7ac4-4b61-b82c-7e21c24ed09a","Type":"ContainerDied","Data":"e2cbd1234f355323654e097773520d51be6d63eb1a628bda97864fb75b269346"} Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.196502 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5986db9b4f-f7l22" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.196505 4784 scope.go:117] "RemoveContainer" containerID="c19bcffc7373a4fdf01882d81ebd9e859eda345b5e5ab7f4a843a6f32178f852" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.203432 4784 generic.go:334] "Generic (PLEG): container finished" podID="3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb" containerID="4eaa5aa22a13fdfcffa2198be8918fc6c736f1b7f0e3336ad90d393a283c5b14" exitCode=0 Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.203711 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865d9b578f-llvj5" event={"ID":"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb","Type":"ContainerDied","Data":"4eaa5aa22a13fdfcffa2198be8918fc6c736f1b7f0e3336ad90d393a283c5b14"} Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.203750 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865d9b578f-llvj5" event={"ID":"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb","Type":"ContainerStarted","Data":"f9e05351c8a95a94d70ae88f34abeb032f8cf4895a1c32ddfde7a3499357dfcf"} Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.206324 4784 generic.go:334] "Generic (PLEG): container finished" podID="0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5" containerID="c7013298326f062d2f068d2c8462b9f760ba2826eeef10805821c060c642e73f" exitCode=0 Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.206430 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" event={"ID":"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5","Type":"ContainerDied","Data":"c7013298326f062d2f068d2c8462b9f760ba2826eeef10805821c060c642e73f"} Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.206474 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" event={"ID":"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5","Type":"ContainerStarted","Data":"2c60ea3ebefe51b1a177c5f5c9ebaa1dbbc37e530b7e0b47e068f2bd76b1a9b7"} Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.210370 4784 generic.go:334] "Generic (PLEG): container finished" podID="cafed0b5-67b1-49dc-934a-096b892dce98" containerID="b75dc5d60112f2e5de60d3b1e9d862aaa25f44c31e360cdc2b74747b32cf25d5" exitCode=0 Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.210414 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" event={"ID":"cafed0b5-67b1-49dc-934a-096b892dce98","Type":"ContainerDied","Data":"b75dc5d60112f2e5de60d3b1e9d862aaa25f44c31e360cdc2b74747b32cf25d5"} Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.210443 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" event={"ID":"cafed0b5-67b1-49dc-934a-096b892dce98","Type":"ContainerDied","Data":"025cd097c79dfc14337e80eb8b129bf8d5177660e3422ab2089da14dc47f6cf7"} Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.210497 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bbd59dc5-ht65f" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.248486 4784 scope.go:117] "RemoveContainer" containerID="c19bcffc7373a4fdf01882d81ebd9e859eda345b5e5ab7f4a843a6f32178f852" Jan 06 09:38:09 crc kubenswrapper[4784]: E0106 09:38:09.292002 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c19bcffc7373a4fdf01882d81ebd9e859eda345b5e5ab7f4a843a6f32178f852\": container with ID starting with c19bcffc7373a4fdf01882d81ebd9e859eda345b5e5ab7f4a843a6f32178f852 not found: ID does not exist" containerID="c19bcffc7373a4fdf01882d81ebd9e859eda345b5e5ab7f4a843a6f32178f852" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.292051 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c19bcffc7373a4fdf01882d81ebd9e859eda345b5e5ab7f4a843a6f32178f852"} err="failed to get container status \"c19bcffc7373a4fdf01882d81ebd9e859eda345b5e5ab7f4a843a6f32178f852\": rpc error: code = NotFound desc = could not find container \"c19bcffc7373a4fdf01882d81ebd9e859eda345b5e5ab7f4a843a6f32178f852\": container with ID starting with c19bcffc7373a4fdf01882d81ebd9e859eda345b5e5ab7f4a843a6f32178f852 not found: ID does not exist" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.292075 4784 scope.go:117] "RemoveContainer" containerID="b75dc5d60112f2e5de60d3b1e9d862aaa25f44c31e360cdc2b74747b32cf25d5" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.348346 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-f7l22"] Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.351076 4784 scope.go:117] "RemoveContainer" containerID="b75dc5d60112f2e5de60d3b1e9d862aaa25f44c31e360cdc2b74747b32cf25d5" Jan 06 09:38:09 crc kubenswrapper[4784]: E0106 09:38:09.355319 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b75dc5d60112f2e5de60d3b1e9d862aaa25f44c31e360cdc2b74747b32cf25d5\": container with ID starting with b75dc5d60112f2e5de60d3b1e9d862aaa25f44c31e360cdc2b74747b32cf25d5 not found: ID does not exist" containerID="b75dc5d60112f2e5de60d3b1e9d862aaa25f44c31e360cdc2b74747b32cf25d5" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.355362 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b75dc5d60112f2e5de60d3b1e9d862aaa25f44c31e360cdc2b74747b32cf25d5"} err="failed to get container status \"b75dc5d60112f2e5de60d3b1e9d862aaa25f44c31e360cdc2b74747b32cf25d5\": rpc error: code = NotFound desc = could not find container \"b75dc5d60112f2e5de60d3b1e9d862aaa25f44c31e360cdc2b74747b32cf25d5\": container with ID starting with b75dc5d60112f2e5de60d3b1e9d862aaa25f44c31e360cdc2b74747b32cf25d5 not found: ID does not exist" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.357837 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-f7l22"] Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.374656 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-ht65f"] Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.381393 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.382681 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.385431 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-xpghk" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.385840 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.385963 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.389387 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-ht65f"] Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.392248 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.392945 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.395194 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.494039 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.510377 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-kolla-config\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.510440 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-config-data-default\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.510461 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-config-data-generated\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.510491 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.510514 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-operator-scripts\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.510576 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.510593 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pljk\" (UniqueName: \"kubernetes.io/projected/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-kube-api-access-5pljk\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.510620 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7ee85662-e315-4bbd-ad58-5035c13cc5f2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7ee85662-e315-4bbd-ad58-5035c13cc5f2\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.611402 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.611455 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pljk\" (UniqueName: \"kubernetes.io/projected/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-kube-api-access-5pljk\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.611499 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7ee85662-e315-4bbd-ad58-5035c13cc5f2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7ee85662-e315-4bbd-ad58-5035c13cc5f2\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.611525 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-kolla-config\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.611593 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-config-data-default\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.611622 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-config-data-generated\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.611660 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.611714 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-operator-scripts\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.613534 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-operator-scripts\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.614636 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-kolla-config\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.614912 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-config-data-default\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.615136 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-config-data-generated\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.620034 4784 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.620074 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7ee85662-e315-4bbd-ad58-5035c13cc5f2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7ee85662-e315-4bbd-ad58-5035c13cc5f2\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b30c08cff3d106437f825ae6ce559c82cca230fe61abf7db46aaaf230a72b92c/globalmount\"" pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.621169 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.621933 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.638969 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pljk\" (UniqueName: \"kubernetes.io/projected/693aaeed-5a0c-4230-a3e1-4b7b74a519cd-kube-api-access-5pljk\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.669923 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7ee85662-e315-4bbd-ad58-5035c13cc5f2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7ee85662-e315-4bbd-ad58-5035c13cc5f2\") pod \"openstack-galera-0\" (UID: \"693aaeed-5a0c-4230-a3e1-4b7b74a519cd\") " pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: E0106 09:38:09.678883 4784 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Jan 06 09:38:09 crc kubenswrapper[4784]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 06 09:38:09 crc kubenswrapper[4784]: > podSandboxID="f9e05351c8a95a94d70ae88f34abeb032f8cf4895a1c32ddfde7a3499357dfcf" Jan 06 09:38:09 crc kubenswrapper[4784]: E0106 09:38:09.679040 4784 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 06 09:38:09 crc kubenswrapper[4784]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nb6hc5h68h68h594h659hdbh679h65ch5f6hdch6h5b9h8fh55hfhf8h57fhc7h56ch687h669h559h678h5dhc7hf7h697h5d6h9ch669h54fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g9prv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-865d9b578f-llvj5_openstack(3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 06 09:38:09 crc kubenswrapper[4784]: > logger="UnhandledError" Jan 06 09:38:09 crc kubenswrapper[4784]: E0106 09:38:09.685903 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-865d9b578f-llvj5" podUID="3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.699010 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 09:38:09 crc kubenswrapper[4784]: W0106 09:38:09.702863 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod59a80e93_fe77_4c67_a4b7_d1682f1bcf14.slice/crio-38c33824959eab29b47a4543f5ab4ebb6af22db923eb6eb1860cba52b4656319 WatchSource:0}: Error finding container 38c33824959eab29b47a4543f5ab4ebb6af22db923eb6eb1860cba52b4656319: Status 404 returned error can't find the container with id 38c33824959eab29b47a4543f5ab4ebb6af22db923eb6eb1860cba52b4656319 Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.719668 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 06 09:38:09 crc kubenswrapper[4784]: I0106 09:38:09.967986 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 09:38:10 crc kubenswrapper[4784]: I0106 09:38:10.223120 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"59a80e93-fe77-4c67-a4b7-d1682f1bcf14","Type":"ContainerStarted","Data":"38c33824959eab29b47a4543f5ab4ebb6af22db923eb6eb1860cba52b4656319"} Jan 06 09:38:10 crc kubenswrapper[4784]: I0106 09:38:10.228559 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" event={"ID":"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5","Type":"ContainerStarted","Data":"714bf7868ef7fbe307a4fafb054968085883985e497c4dd05aa1bdd0bbd98574"} Jan 06 09:38:10 crc kubenswrapper[4784]: I0106 09:38:10.228772 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" Jan 06 09:38:10 crc kubenswrapper[4784]: I0106 09:38:10.231045 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"932c7932-8fbb-4943-b833-8481ed70de32","Type":"ContainerStarted","Data":"e2285a4c2dd3701cd4d4ad1ea42d32a43b9c09ed69d97a5cb39733682bef7d9c"} Jan 06 09:38:10 crc kubenswrapper[4784]: I0106 09:38:10.233615 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 06 09:38:10 crc kubenswrapper[4784]: W0106 09:38:10.243183 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod693aaeed_5a0c_4230_a3e1_4b7b74a519cd.slice/crio-b4a079cd43044d43a1703da9232a400be4cc453b47eae5ce31e2674437de93f6 WatchSource:0}: Error finding container b4a079cd43044d43a1703da9232a400be4cc453b47eae5ce31e2674437de93f6: Status 404 returned error can't find the container with id b4a079cd43044d43a1703da9232a400be4cc453b47eae5ce31e2674437de93f6 Jan 06 09:38:10 crc kubenswrapper[4784]: I0106 09:38:10.288461 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" podStartSLOduration=3.288446046 podStartE2EDuration="3.288446046s" podCreationTimestamp="2026-01-06 09:38:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:38:10.262354717 +0000 UTC m=+4992.308527594" watchObservedRunningTime="2026-01-06 09:38:10.288446046 +0000 UTC m=+4992.334618883" Jan 06 09:38:10 crc kubenswrapper[4784]: I0106 09:38:10.338602 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3425a948-7ac4-4b61-b82c-7e21c24ed09a" path="/var/lib/kubelet/pods/3425a948-7ac4-4b61-b82c-7e21c24ed09a/volumes" Jan 06 09:38:10 crc kubenswrapper[4784]: I0106 09:38:10.339204 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cafed0b5-67b1-49dc-934a-096b892dce98" path="/var/lib/kubelet/pods/cafed0b5-67b1-49dc-934a-096b892dce98/volumes" Jan 06 09:38:10 crc kubenswrapper[4784]: I0106 09:38:10.873192 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 06 09:38:10 crc kubenswrapper[4784]: I0106 09:38:10.877422 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:10 crc kubenswrapper[4784]: I0106 09:38:10.882383 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 06 09:38:10 crc kubenswrapper[4784]: I0106 09:38:10.883235 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-w88gk" Jan 06 09:38:10 crc kubenswrapper[4784]: I0106 09:38:10.891942 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 06 09:38:10 crc kubenswrapper[4784]: I0106 09:38:10.898100 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 06 09:38:10 crc kubenswrapper[4784]: I0106 09:38:10.940961 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.033504 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkpjp\" (UniqueName: \"kubernetes.io/projected/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-kube-api-access-pkpjp\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.033674 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.033798 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-3f44b63b-5d9c-4123-ac70-12066b38826f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3f44b63b-5d9c-4123-ac70-12066b38826f\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.033890 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.034200 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.034269 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.034366 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.034412 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.135960 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.136284 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.136380 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.136462 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.136562 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkpjp\" (UniqueName: \"kubernetes.io/projected/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-kube-api-access-pkpjp\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.136646 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.136734 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-3f44b63b-5d9c-4123-ac70-12066b38826f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3f44b63b-5d9c-4123-ac70-12066b38826f\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.136818 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.137301 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.138329 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.138453 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.139189 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.142315 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.143762 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.144498 4784 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.144581 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-3f44b63b-5d9c-4123-ac70-12066b38826f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3f44b63b-5d9c-4123-ac70-12066b38826f\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/bfc379fa733f3a41181e3a6afe03b6b3f75ae891e1b645e148e4ef8721abf603/globalmount\"" pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.161756 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkpjp\" (UniqueName: \"kubernetes.io/projected/0c62e4c3-2b3e-49bf-940c-0bfb4b23f066-kube-api-access-pkpjp\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.168953 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-3f44b63b-5d9c-4123-ac70-12066b38826f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3f44b63b-5d9c-4123-ac70-12066b38826f\") pod \"openstack-cell1-galera-0\" (UID: \"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066\") " pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.221182 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.241374 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"59a80e93-fe77-4c67-a4b7-d1682f1bcf14","Type":"ContainerStarted","Data":"07b3e4b3a44be2862df885dade5439e104fed043c5dc46a390eaae7529928b3e"} Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.243605 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865d9b578f-llvj5" event={"ID":"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb","Type":"ContainerStarted","Data":"8c7c4b3cc698340d3b464ef0546fd7a519a244b403baad7d77afd9ce7b0cc788"} Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.244272 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-865d9b578f-llvj5" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.247000 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"693aaeed-5a0c-4230-a3e1-4b7b74a519cd","Type":"ContainerStarted","Data":"2222aececd47f277bb7334924e5fad98585e3f9d568947a6b4ed6cb8c6b2173b"} Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.247030 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"693aaeed-5a0c-4230-a3e1-4b7b74a519cd","Type":"ContainerStarted","Data":"b4a079cd43044d43a1703da9232a400be4cc453b47eae5ce31e2674437de93f6"} Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.297586 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-865d9b578f-llvj5" podStartSLOduration=4.297568164 podStartE2EDuration="4.297568164s" podCreationTimestamp="2026-01-06 09:38:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:38:11.296750409 +0000 UTC m=+4993.342923256" watchObservedRunningTime="2026-01-06 09:38:11.297568164 +0000 UTC m=+4993.343740991" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.426094 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.426988 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.435307 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-7tf2p" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.435344 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.435476 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.460328 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.543365 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7230cd6-8400-4e6f-b4fd-a086e38629b3-combined-ca-bundle\") pod \"memcached-0\" (UID: \"c7230cd6-8400-4e6f-b4fd-a086e38629b3\") " pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.543406 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7230cd6-8400-4e6f-b4fd-a086e38629b3-memcached-tls-certs\") pod \"memcached-0\" (UID: \"c7230cd6-8400-4e6f-b4fd-a086e38629b3\") " pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.543435 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c7230cd6-8400-4e6f-b4fd-a086e38629b3-kolla-config\") pod \"memcached-0\" (UID: \"c7230cd6-8400-4e6f-b4fd-a086e38629b3\") " pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.543484 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c7230cd6-8400-4e6f-b4fd-a086e38629b3-config-data\") pod \"memcached-0\" (UID: \"c7230cd6-8400-4e6f-b4fd-a086e38629b3\") " pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.543526 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9lw4\" (UniqueName: \"kubernetes.io/projected/c7230cd6-8400-4e6f-b4fd-a086e38629b3-kube-api-access-g9lw4\") pod \"memcached-0\" (UID: \"c7230cd6-8400-4e6f-b4fd-a086e38629b3\") " pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.645222 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c7230cd6-8400-4e6f-b4fd-a086e38629b3-config-data\") pod \"memcached-0\" (UID: \"c7230cd6-8400-4e6f-b4fd-a086e38629b3\") " pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.645330 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9lw4\" (UniqueName: \"kubernetes.io/projected/c7230cd6-8400-4e6f-b4fd-a086e38629b3-kube-api-access-g9lw4\") pod \"memcached-0\" (UID: \"c7230cd6-8400-4e6f-b4fd-a086e38629b3\") " pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.645399 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7230cd6-8400-4e6f-b4fd-a086e38629b3-combined-ca-bundle\") pod \"memcached-0\" (UID: \"c7230cd6-8400-4e6f-b4fd-a086e38629b3\") " pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.645426 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7230cd6-8400-4e6f-b4fd-a086e38629b3-memcached-tls-certs\") pod \"memcached-0\" (UID: \"c7230cd6-8400-4e6f-b4fd-a086e38629b3\") " pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.645458 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c7230cd6-8400-4e6f-b4fd-a086e38629b3-kolla-config\") pod \"memcached-0\" (UID: \"c7230cd6-8400-4e6f-b4fd-a086e38629b3\") " pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.646085 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c7230cd6-8400-4e6f-b4fd-a086e38629b3-config-data\") pod \"memcached-0\" (UID: \"c7230cd6-8400-4e6f-b4fd-a086e38629b3\") " pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.646168 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c7230cd6-8400-4e6f-b4fd-a086e38629b3-kolla-config\") pod \"memcached-0\" (UID: \"c7230cd6-8400-4e6f-b4fd-a086e38629b3\") " pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.649667 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7230cd6-8400-4e6f-b4fd-a086e38629b3-memcached-tls-certs\") pod \"memcached-0\" (UID: \"c7230cd6-8400-4e6f-b4fd-a086e38629b3\") " pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.651804 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7230cd6-8400-4e6f-b4fd-a086e38629b3-combined-ca-bundle\") pod \"memcached-0\" (UID: \"c7230cd6-8400-4e6f-b4fd-a086e38629b3\") " pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.662906 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9lw4\" (UniqueName: \"kubernetes.io/projected/c7230cd6-8400-4e6f-b4fd-a086e38629b3-kube-api-access-g9lw4\") pod \"memcached-0\" (UID: \"c7230cd6-8400-4e6f-b4fd-a086e38629b3\") " pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.749679 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 06 09:38:11 crc kubenswrapper[4784]: I0106 09:38:11.757338 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 06 09:38:11 crc kubenswrapper[4784]: W0106 09:38:11.773660 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0c62e4c3_2b3e_49bf_940c_0bfb4b23f066.slice/crio-de5e92279108e32ea92de032c695e33fafe618d5b01d052b42135a7ab94fbd41 WatchSource:0}: Error finding container de5e92279108e32ea92de032c695e33fafe618d5b01d052b42135a7ab94fbd41: Status 404 returned error can't find the container with id de5e92279108e32ea92de032c695e33fafe618d5b01d052b42135a7ab94fbd41 Jan 06 09:38:12 crc kubenswrapper[4784]: I0106 09:38:12.245932 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 06 09:38:12 crc kubenswrapper[4784]: W0106 09:38:12.256828 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc7230cd6_8400_4e6f_b4fd_a086e38629b3.slice/crio-143e692103d280151987e3b6f082490eb1ff295164d768985d94f1350c8feb96 WatchSource:0}: Error finding container 143e692103d280151987e3b6f082490eb1ff295164d768985d94f1350c8feb96: Status 404 returned error can't find the container with id 143e692103d280151987e3b6f082490eb1ff295164d768985d94f1350c8feb96 Jan 06 09:38:12 crc kubenswrapper[4784]: I0106 09:38:12.265262 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066","Type":"ContainerStarted","Data":"cba61d129d197e8ae6a24573986a059bb67571e00514786fa74d2f9649082e3c"} Jan 06 09:38:12 crc kubenswrapper[4784]: I0106 09:38:12.265329 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066","Type":"ContainerStarted","Data":"de5e92279108e32ea92de032c695e33fafe618d5b01d052b42135a7ab94fbd41"} Jan 06 09:38:12 crc kubenswrapper[4784]: I0106 09:38:12.267833 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"932c7932-8fbb-4943-b833-8481ed70de32","Type":"ContainerStarted","Data":"9faffe6656064c65960493c3f53f77da1d09e667fc586a873ef4951222b51973"} Jan 06 09:38:13 crc kubenswrapper[4784]: I0106 09:38:13.280415 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"c7230cd6-8400-4e6f-b4fd-a086e38629b3","Type":"ContainerStarted","Data":"cccc271b4efb7741f25ad57c44987c1415912bb1aab91e20eaeddde68e247f17"} Jan 06 09:38:13 crc kubenswrapper[4784]: I0106 09:38:13.280481 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"c7230cd6-8400-4e6f-b4fd-a086e38629b3","Type":"ContainerStarted","Data":"143e692103d280151987e3b6f082490eb1ff295164d768985d94f1350c8feb96"} Jan 06 09:38:14 crc kubenswrapper[4784]: I0106 09:38:14.291479 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 06 09:38:15 crc kubenswrapper[4784]: I0106 09:38:15.304965 4784 generic.go:334] "Generic (PLEG): container finished" podID="693aaeed-5a0c-4230-a3e1-4b7b74a519cd" containerID="2222aececd47f277bb7334924e5fad98585e3f9d568947a6b4ed6cb8c6b2173b" exitCode=0 Jan 06 09:38:15 crc kubenswrapper[4784]: I0106 09:38:15.305088 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"693aaeed-5a0c-4230-a3e1-4b7b74a519cd","Type":"ContainerDied","Data":"2222aececd47f277bb7334924e5fad98585e3f9d568947a6b4ed6cb8c6b2173b"} Jan 06 09:38:15 crc kubenswrapper[4784]: I0106 09:38:15.361843 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=4.361810777 podStartE2EDuration="4.361810777s" podCreationTimestamp="2026-01-06 09:38:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:38:13.310740272 +0000 UTC m=+4995.356913149" watchObservedRunningTime="2026-01-06 09:38:15.361810777 +0000 UTC m=+4997.407983654" Jan 06 09:38:16 crc kubenswrapper[4784]: I0106 09:38:16.319948 4784 generic.go:334] "Generic (PLEG): container finished" podID="0c62e4c3-2b3e-49bf-940c-0bfb4b23f066" containerID="cba61d129d197e8ae6a24573986a059bb67571e00514786fa74d2f9649082e3c" exitCode=0 Jan 06 09:38:16 crc kubenswrapper[4784]: I0106 09:38:16.329255 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066","Type":"ContainerDied","Data":"cba61d129d197e8ae6a24573986a059bb67571e00514786fa74d2f9649082e3c"} Jan 06 09:38:16 crc kubenswrapper[4784]: I0106 09:38:16.329333 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"693aaeed-5a0c-4230-a3e1-4b7b74a519cd","Type":"ContainerStarted","Data":"cc2be2b11f91ab718d5bdc56aae488ffc62ea5a5769c9f5fbab22c028e0d0085"} Jan 06 09:38:16 crc kubenswrapper[4784]: I0106 09:38:16.368113 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=8.368096647 podStartE2EDuration="8.368096647s" podCreationTimestamp="2026-01-06 09:38:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:38:16.357083635 +0000 UTC m=+4998.403256522" watchObservedRunningTime="2026-01-06 09:38:16.368096647 +0000 UTC m=+4998.414269474" Jan 06 09:38:17 crc kubenswrapper[4784]: I0106 09:38:17.337215 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0c62e4c3-2b3e-49bf-940c-0bfb4b23f066","Type":"ContainerStarted","Data":"d2507159260c73087345ed2957c6962f538be534f987188e58a1e08fe6207ac8"} Jan 06 09:38:17 crc kubenswrapper[4784]: I0106 09:38:17.381095 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=8.381073464 podStartE2EDuration="8.381073464s" podCreationTimestamp="2026-01-06 09:38:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:38:17.369081723 +0000 UTC m=+4999.415254570" watchObservedRunningTime="2026-01-06 09:38:17.381073464 +0000 UTC m=+4999.427246311" Jan 06 09:38:17 crc kubenswrapper[4784]: I0106 09:38:17.696086 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-865d9b578f-llvj5" Jan 06 09:38:18 crc kubenswrapper[4784]: I0106 09:38:18.037809 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" Jan 06 09:38:18 crc kubenswrapper[4784]: I0106 09:38:18.102699 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865d9b578f-llvj5"] Jan 06 09:38:18 crc kubenswrapper[4784]: I0106 09:38:18.343709 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-865d9b578f-llvj5" podUID="3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb" containerName="dnsmasq-dns" containerID="cri-o://8c7c4b3cc698340d3b464ef0546fd7a519a244b403baad7d77afd9ce7b0cc788" gracePeriod=10 Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.352474 4784 generic.go:334] "Generic (PLEG): container finished" podID="3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb" containerID="8c7c4b3cc698340d3b464ef0546fd7a519a244b403baad7d77afd9ce7b0cc788" exitCode=0 Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.352593 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865d9b578f-llvj5" event={"ID":"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb","Type":"ContainerDied","Data":"8c7c4b3cc698340d3b464ef0546fd7a519a244b403baad7d77afd9ce7b0cc788"} Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.352945 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865d9b578f-llvj5" event={"ID":"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb","Type":"ContainerDied","Data":"f9e05351c8a95a94d70ae88f34abeb032f8cf4895a1c32ddfde7a3499357dfcf"} Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.352972 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9e05351c8a95a94d70ae88f34abeb032f8cf4895a1c32ddfde7a3499357dfcf" Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.382216 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865d9b578f-llvj5" Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.497583 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-config\") pod \"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb\" (UID: \"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb\") " Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.497654 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-dns-svc\") pod \"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb\" (UID: \"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb\") " Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.497739 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9prv\" (UniqueName: \"kubernetes.io/projected/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-kube-api-access-g9prv\") pod \"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb\" (UID: \"3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb\") " Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.510963 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-kube-api-access-g9prv" (OuterVolumeSpecName: "kube-api-access-g9prv") pod "3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb" (UID: "3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb"). InnerVolumeSpecName "kube-api-access-g9prv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.555218 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb" (UID: "3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.556071 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-config" (OuterVolumeSpecName: "config") pod "3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb" (UID: "3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.599257 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-config\") on node \"crc\" DevicePath \"\"" Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.599296 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.599309 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9prv\" (UniqueName: \"kubernetes.io/projected/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb-kube-api-access-g9prv\") on node \"crc\" DevicePath \"\"" Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.720594 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 06 09:38:19 crc kubenswrapper[4784]: I0106 09:38:19.720911 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 06 09:38:20 crc kubenswrapper[4784]: I0106 09:38:20.361956 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865d9b578f-llvj5" Jan 06 09:38:20 crc kubenswrapper[4784]: I0106 09:38:20.394937 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865d9b578f-llvj5"] Jan 06 09:38:20 crc kubenswrapper[4784]: I0106 09:38:20.406426 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-865d9b578f-llvj5"] Jan 06 09:38:20 crc kubenswrapper[4784]: I0106 09:38:20.466913 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 06 09:38:21 crc kubenswrapper[4784]: I0106 09:38:21.222138 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:21 crc kubenswrapper[4784]: I0106 09:38:21.222178 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:21 crc kubenswrapper[4784]: I0106 09:38:21.500627 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 06 09:38:21 crc kubenswrapper[4784]: I0106 09:38:21.751586 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 06 09:38:22 crc kubenswrapper[4784]: I0106 09:38:22.333307 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb" path="/var/lib/kubelet/pods/3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb/volumes" Jan 06 09:38:23 crc kubenswrapper[4784]: I0106 09:38:23.811017 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:23 crc kubenswrapper[4784]: I0106 09:38:23.935501 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 06 09:38:28 crc kubenswrapper[4784]: I0106 09:38:28.387150 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-gh24p"] Jan 06 09:38:28 crc kubenswrapper[4784]: E0106 09:38:28.388294 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb" containerName="init" Jan 06 09:38:28 crc kubenswrapper[4784]: I0106 09:38:28.388317 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb" containerName="init" Jan 06 09:38:28 crc kubenswrapper[4784]: E0106 09:38:28.388334 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb" containerName="dnsmasq-dns" Jan 06 09:38:28 crc kubenswrapper[4784]: I0106 09:38:28.388347 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb" containerName="dnsmasq-dns" Jan 06 09:38:28 crc kubenswrapper[4784]: I0106 09:38:28.388696 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d9aa2d1-517e-42e7-849f-ecb9fef2b4cb" containerName="dnsmasq-dns" Jan 06 09:38:28 crc kubenswrapper[4784]: I0106 09:38:28.389428 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-gh24p" Jan 06 09:38:28 crc kubenswrapper[4784]: I0106 09:38:28.392871 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 06 09:38:28 crc kubenswrapper[4784]: I0106 09:38:28.403287 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-gh24p"] Jan 06 09:38:28 crc kubenswrapper[4784]: I0106 09:38:28.471479 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4999db7a-8b91-485b-972e-991db2fc1df3-operator-scripts\") pod \"root-account-create-update-gh24p\" (UID: \"4999db7a-8b91-485b-972e-991db2fc1df3\") " pod="openstack/root-account-create-update-gh24p" Jan 06 09:38:28 crc kubenswrapper[4784]: I0106 09:38:28.471828 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gc768\" (UniqueName: \"kubernetes.io/projected/4999db7a-8b91-485b-972e-991db2fc1df3-kube-api-access-gc768\") pod \"root-account-create-update-gh24p\" (UID: \"4999db7a-8b91-485b-972e-991db2fc1df3\") " pod="openstack/root-account-create-update-gh24p" Jan 06 09:38:28 crc kubenswrapper[4784]: I0106 09:38:28.574140 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4999db7a-8b91-485b-972e-991db2fc1df3-operator-scripts\") pod \"root-account-create-update-gh24p\" (UID: \"4999db7a-8b91-485b-972e-991db2fc1df3\") " pod="openstack/root-account-create-update-gh24p" Jan 06 09:38:28 crc kubenswrapper[4784]: I0106 09:38:28.574341 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gc768\" (UniqueName: \"kubernetes.io/projected/4999db7a-8b91-485b-972e-991db2fc1df3-kube-api-access-gc768\") pod \"root-account-create-update-gh24p\" (UID: \"4999db7a-8b91-485b-972e-991db2fc1df3\") " pod="openstack/root-account-create-update-gh24p" Jan 06 09:38:28 crc kubenswrapper[4784]: I0106 09:38:28.576037 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4999db7a-8b91-485b-972e-991db2fc1df3-operator-scripts\") pod \"root-account-create-update-gh24p\" (UID: \"4999db7a-8b91-485b-972e-991db2fc1df3\") " pod="openstack/root-account-create-update-gh24p" Jan 06 09:38:28 crc kubenswrapper[4784]: I0106 09:38:28.606833 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gc768\" (UniqueName: \"kubernetes.io/projected/4999db7a-8b91-485b-972e-991db2fc1df3-kube-api-access-gc768\") pod \"root-account-create-update-gh24p\" (UID: \"4999db7a-8b91-485b-972e-991db2fc1df3\") " pod="openstack/root-account-create-update-gh24p" Jan 06 09:38:28 crc kubenswrapper[4784]: I0106 09:38:28.719735 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-gh24p" Jan 06 09:38:29 crc kubenswrapper[4784]: I0106 09:38:29.065100 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-gh24p"] Jan 06 09:38:29 crc kubenswrapper[4784]: I0106 09:38:29.447087 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-gh24p" event={"ID":"4999db7a-8b91-485b-972e-991db2fc1df3","Type":"ContainerStarted","Data":"90e4cec34758895a85b624729094bb131c08429a4c1d73b5981933e599258e20"} Jan 06 09:38:29 crc kubenswrapper[4784]: I0106 09:38:29.447149 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-gh24p" event={"ID":"4999db7a-8b91-485b-972e-991db2fc1df3","Type":"ContainerStarted","Data":"0d4ee6af34abef487529f8231bafd9cc901a48c8fd20c7e4e9927cba2bc6be6e"} Jan 06 09:38:29 crc kubenswrapper[4784]: I0106 09:38:29.467632 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-gh24p" podStartSLOduration=1.467610101 podStartE2EDuration="1.467610101s" podCreationTimestamp="2026-01-06 09:38:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:38:29.46144795 +0000 UTC m=+5011.507620807" watchObservedRunningTime="2026-01-06 09:38:29.467610101 +0000 UTC m=+5011.513782948" Jan 06 09:38:30 crc kubenswrapper[4784]: I0106 09:38:30.461756 4784 generic.go:334] "Generic (PLEG): container finished" podID="4999db7a-8b91-485b-972e-991db2fc1df3" containerID="90e4cec34758895a85b624729094bb131c08429a4c1d73b5981933e599258e20" exitCode=0 Jan 06 09:38:30 crc kubenswrapper[4784]: I0106 09:38:30.462165 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-gh24p" event={"ID":"4999db7a-8b91-485b-972e-991db2fc1df3","Type":"ContainerDied","Data":"90e4cec34758895a85b624729094bb131c08429a4c1d73b5981933e599258e20"} Jan 06 09:38:31 crc kubenswrapper[4784]: I0106 09:38:31.856808 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-gh24p" Jan 06 09:38:31 crc kubenswrapper[4784]: I0106 09:38:31.956215 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gc768\" (UniqueName: \"kubernetes.io/projected/4999db7a-8b91-485b-972e-991db2fc1df3-kube-api-access-gc768\") pod \"4999db7a-8b91-485b-972e-991db2fc1df3\" (UID: \"4999db7a-8b91-485b-972e-991db2fc1df3\") " Jan 06 09:38:31 crc kubenswrapper[4784]: I0106 09:38:31.956301 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4999db7a-8b91-485b-972e-991db2fc1df3-operator-scripts\") pod \"4999db7a-8b91-485b-972e-991db2fc1df3\" (UID: \"4999db7a-8b91-485b-972e-991db2fc1df3\") " Jan 06 09:38:31 crc kubenswrapper[4784]: I0106 09:38:31.957367 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4999db7a-8b91-485b-972e-991db2fc1df3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4999db7a-8b91-485b-972e-991db2fc1df3" (UID: "4999db7a-8b91-485b-972e-991db2fc1df3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:38:31 crc kubenswrapper[4784]: I0106 09:38:31.964945 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4999db7a-8b91-485b-972e-991db2fc1df3-kube-api-access-gc768" (OuterVolumeSpecName: "kube-api-access-gc768") pod "4999db7a-8b91-485b-972e-991db2fc1df3" (UID: "4999db7a-8b91-485b-972e-991db2fc1df3"). InnerVolumeSpecName "kube-api-access-gc768". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:38:32 crc kubenswrapper[4784]: I0106 09:38:32.058564 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gc768\" (UniqueName: \"kubernetes.io/projected/4999db7a-8b91-485b-972e-991db2fc1df3-kube-api-access-gc768\") on node \"crc\" DevicePath \"\"" Jan 06 09:38:32 crc kubenswrapper[4784]: I0106 09:38:32.059050 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4999db7a-8b91-485b-972e-991db2fc1df3-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 09:38:32 crc kubenswrapper[4784]: I0106 09:38:32.478481 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-gh24p" event={"ID":"4999db7a-8b91-485b-972e-991db2fc1df3","Type":"ContainerDied","Data":"0d4ee6af34abef487529f8231bafd9cc901a48c8fd20c7e4e9927cba2bc6be6e"} Jan 06 09:38:32 crc kubenswrapper[4784]: I0106 09:38:32.478528 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-gh24p" Jan 06 09:38:32 crc kubenswrapper[4784]: I0106 09:38:32.478532 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d4ee6af34abef487529f8231bafd9cc901a48c8fd20c7e4e9927cba2bc6be6e" Jan 06 09:38:34 crc kubenswrapper[4784]: I0106 09:38:34.862651 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-gh24p"] Jan 06 09:38:34 crc kubenswrapper[4784]: I0106 09:38:34.870255 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-gh24p"] Jan 06 09:38:36 crc kubenswrapper[4784]: I0106 09:38:36.332997 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4999db7a-8b91-485b-972e-991db2fc1df3" path="/var/lib/kubelet/pods/4999db7a-8b91-485b-972e-991db2fc1df3/volumes" Jan 06 09:38:39 crc kubenswrapper[4784]: I0106 09:38:39.874458 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-cr2vz"] Jan 06 09:38:39 crc kubenswrapper[4784]: E0106 09:38:39.875269 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4999db7a-8b91-485b-972e-991db2fc1df3" containerName="mariadb-account-create-update" Jan 06 09:38:39 crc kubenswrapper[4784]: I0106 09:38:39.875292 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="4999db7a-8b91-485b-972e-991db2fc1df3" containerName="mariadb-account-create-update" Jan 06 09:38:39 crc kubenswrapper[4784]: I0106 09:38:39.875682 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="4999db7a-8b91-485b-972e-991db2fc1df3" containerName="mariadb-account-create-update" Jan 06 09:38:39 crc kubenswrapper[4784]: I0106 09:38:39.876646 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cr2vz" Jan 06 09:38:39 crc kubenswrapper[4784]: I0106 09:38:39.879159 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 06 09:38:39 crc kubenswrapper[4784]: I0106 09:38:39.889644 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-cr2vz"] Jan 06 09:38:40 crc kubenswrapper[4784]: I0106 09:38:40.010892 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gr7l\" (UniqueName: \"kubernetes.io/projected/31150519-a40e-4c55-8b6d-a28ca67c6ff8-kube-api-access-8gr7l\") pod \"root-account-create-update-cr2vz\" (UID: \"31150519-a40e-4c55-8b6d-a28ca67c6ff8\") " pod="openstack/root-account-create-update-cr2vz" Jan 06 09:38:40 crc kubenswrapper[4784]: I0106 09:38:40.011326 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31150519-a40e-4c55-8b6d-a28ca67c6ff8-operator-scripts\") pod \"root-account-create-update-cr2vz\" (UID: \"31150519-a40e-4c55-8b6d-a28ca67c6ff8\") " pod="openstack/root-account-create-update-cr2vz" Jan 06 09:38:40 crc kubenswrapper[4784]: I0106 09:38:40.112447 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gr7l\" (UniqueName: \"kubernetes.io/projected/31150519-a40e-4c55-8b6d-a28ca67c6ff8-kube-api-access-8gr7l\") pod \"root-account-create-update-cr2vz\" (UID: \"31150519-a40e-4c55-8b6d-a28ca67c6ff8\") " pod="openstack/root-account-create-update-cr2vz" Jan 06 09:38:40 crc kubenswrapper[4784]: I0106 09:38:40.112707 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31150519-a40e-4c55-8b6d-a28ca67c6ff8-operator-scripts\") pod \"root-account-create-update-cr2vz\" (UID: \"31150519-a40e-4c55-8b6d-a28ca67c6ff8\") " pod="openstack/root-account-create-update-cr2vz" Jan 06 09:38:40 crc kubenswrapper[4784]: I0106 09:38:40.114489 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31150519-a40e-4c55-8b6d-a28ca67c6ff8-operator-scripts\") pod \"root-account-create-update-cr2vz\" (UID: \"31150519-a40e-4c55-8b6d-a28ca67c6ff8\") " pod="openstack/root-account-create-update-cr2vz" Jan 06 09:38:40 crc kubenswrapper[4784]: I0106 09:38:40.146048 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gr7l\" (UniqueName: \"kubernetes.io/projected/31150519-a40e-4c55-8b6d-a28ca67c6ff8-kube-api-access-8gr7l\") pod \"root-account-create-update-cr2vz\" (UID: \"31150519-a40e-4c55-8b6d-a28ca67c6ff8\") " pod="openstack/root-account-create-update-cr2vz" Jan 06 09:38:40 crc kubenswrapper[4784]: I0106 09:38:40.210181 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cr2vz" Jan 06 09:38:40 crc kubenswrapper[4784]: I0106 09:38:40.713129 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-cr2vz"] Jan 06 09:38:40 crc kubenswrapper[4784]: W0106 09:38:40.716943 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod31150519_a40e_4c55_8b6d_a28ca67c6ff8.slice/crio-cdaed3bd751eac839330364914870f94beb8f051bdeebcd41bc9dd2ae161ad7d WatchSource:0}: Error finding container cdaed3bd751eac839330364914870f94beb8f051bdeebcd41bc9dd2ae161ad7d: Status 404 returned error can't find the container with id cdaed3bd751eac839330364914870f94beb8f051bdeebcd41bc9dd2ae161ad7d Jan 06 09:38:41 crc kubenswrapper[4784]: I0106 09:38:41.567260 4784 generic.go:334] "Generic (PLEG): container finished" podID="31150519-a40e-4c55-8b6d-a28ca67c6ff8" containerID="351cb6bf5de0e2f11e590ff4da3f121ca080e6b057765fd36e151b7da752691a" exitCode=0 Jan 06 09:38:41 crc kubenswrapper[4784]: I0106 09:38:41.567344 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-cr2vz" event={"ID":"31150519-a40e-4c55-8b6d-a28ca67c6ff8","Type":"ContainerDied","Data":"351cb6bf5de0e2f11e590ff4da3f121ca080e6b057765fd36e151b7da752691a"} Jan 06 09:38:41 crc kubenswrapper[4784]: I0106 09:38:41.567419 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-cr2vz" event={"ID":"31150519-a40e-4c55-8b6d-a28ca67c6ff8","Type":"ContainerStarted","Data":"cdaed3bd751eac839330364914870f94beb8f051bdeebcd41bc9dd2ae161ad7d"} Jan 06 09:38:42 crc kubenswrapper[4784]: I0106 09:38:42.944977 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cr2vz" Jan 06 09:38:43 crc kubenswrapper[4784]: I0106 09:38:43.065643 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gr7l\" (UniqueName: \"kubernetes.io/projected/31150519-a40e-4c55-8b6d-a28ca67c6ff8-kube-api-access-8gr7l\") pod \"31150519-a40e-4c55-8b6d-a28ca67c6ff8\" (UID: \"31150519-a40e-4c55-8b6d-a28ca67c6ff8\") " Jan 06 09:38:43 crc kubenswrapper[4784]: I0106 09:38:43.065810 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31150519-a40e-4c55-8b6d-a28ca67c6ff8-operator-scripts\") pod \"31150519-a40e-4c55-8b6d-a28ca67c6ff8\" (UID: \"31150519-a40e-4c55-8b6d-a28ca67c6ff8\") " Jan 06 09:38:43 crc kubenswrapper[4784]: I0106 09:38:43.067114 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31150519-a40e-4c55-8b6d-a28ca67c6ff8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "31150519-a40e-4c55-8b6d-a28ca67c6ff8" (UID: "31150519-a40e-4c55-8b6d-a28ca67c6ff8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:38:43 crc kubenswrapper[4784]: I0106 09:38:43.071902 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31150519-a40e-4c55-8b6d-a28ca67c6ff8-kube-api-access-8gr7l" (OuterVolumeSpecName: "kube-api-access-8gr7l") pod "31150519-a40e-4c55-8b6d-a28ca67c6ff8" (UID: "31150519-a40e-4c55-8b6d-a28ca67c6ff8"). InnerVolumeSpecName "kube-api-access-8gr7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:38:43 crc kubenswrapper[4784]: I0106 09:38:43.167213 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gr7l\" (UniqueName: \"kubernetes.io/projected/31150519-a40e-4c55-8b6d-a28ca67c6ff8-kube-api-access-8gr7l\") on node \"crc\" DevicePath \"\"" Jan 06 09:38:43 crc kubenswrapper[4784]: I0106 09:38:43.167256 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31150519-a40e-4c55-8b6d-a28ca67c6ff8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 09:38:43 crc kubenswrapper[4784]: I0106 09:38:43.588698 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-cr2vz" event={"ID":"31150519-a40e-4c55-8b6d-a28ca67c6ff8","Type":"ContainerDied","Data":"cdaed3bd751eac839330364914870f94beb8f051bdeebcd41bc9dd2ae161ad7d"} Jan 06 09:38:43 crc kubenswrapper[4784]: I0106 09:38:43.588752 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cdaed3bd751eac839330364914870f94beb8f051bdeebcd41bc9dd2ae161ad7d" Jan 06 09:38:43 crc kubenswrapper[4784]: I0106 09:38:43.588814 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cr2vz" Jan 06 09:38:43 crc kubenswrapper[4784]: I0106 09:38:43.595176 4784 generic.go:334] "Generic (PLEG): container finished" podID="59a80e93-fe77-4c67-a4b7-d1682f1bcf14" containerID="07b3e4b3a44be2862df885dade5439e104fed043c5dc46a390eaae7529928b3e" exitCode=0 Jan 06 09:38:43 crc kubenswrapper[4784]: I0106 09:38:43.595222 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"59a80e93-fe77-4c67-a4b7-d1682f1bcf14","Type":"ContainerDied","Data":"07b3e4b3a44be2862df885dade5439e104fed043c5dc46a390eaae7529928b3e"} Jan 06 09:38:44 crc kubenswrapper[4784]: I0106 09:38:44.351247 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:38:44 crc kubenswrapper[4784]: I0106 09:38:44.351738 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:38:44 crc kubenswrapper[4784]: I0106 09:38:44.606615 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"59a80e93-fe77-4c67-a4b7-d1682f1bcf14","Type":"ContainerStarted","Data":"33bfaa869e2881ea899df11d620eedbfb770f1b6d1ec10d66e2a2342eddacc19"} Jan 06 09:38:44 crc kubenswrapper[4784]: I0106 09:38:44.607134 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:44 crc kubenswrapper[4784]: I0106 09:38:44.608811 4784 generic.go:334] "Generic (PLEG): container finished" podID="932c7932-8fbb-4943-b833-8481ed70de32" containerID="9faffe6656064c65960493c3f53f77da1d09e667fc586a873ef4951222b51973" exitCode=0 Jan 06 09:38:44 crc kubenswrapper[4784]: I0106 09:38:44.608846 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"932c7932-8fbb-4943-b833-8481ed70de32","Type":"ContainerDied","Data":"9faffe6656064c65960493c3f53f77da1d09e667fc586a873ef4951222b51973"} Jan 06 09:38:44 crc kubenswrapper[4784]: I0106 09:38:44.659244 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.659216512 podStartE2EDuration="37.659216512s" podCreationTimestamp="2026-01-06 09:38:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:38:44.644589328 +0000 UTC m=+5026.690762255" watchObservedRunningTime="2026-01-06 09:38:44.659216512 +0000 UTC m=+5026.705389379" Jan 06 09:38:45 crc kubenswrapper[4784]: I0106 09:38:45.624644 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"932c7932-8fbb-4943-b833-8481ed70de32","Type":"ContainerStarted","Data":"7e3a9232258adb8f1d116df7993e5046263992356b26f0b7e56fbee5aec43eee"} Jan 06 09:38:45 crc kubenswrapper[4784]: I0106 09:38:45.625018 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 06 09:38:45 crc kubenswrapper[4784]: I0106 09:38:45.661970 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.661943151 podStartE2EDuration="38.661943151s" podCreationTimestamp="2026-01-06 09:38:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:38:45.656226853 +0000 UTC m=+5027.702399710" watchObservedRunningTime="2026-01-06 09:38:45.661943151 +0000 UTC m=+5027.708116018" Jan 06 09:38:58 crc kubenswrapper[4784]: I0106 09:38:58.897832 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:38:59 crc kubenswrapper[4784]: I0106 09:38:59.498793 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 06 09:39:05 crc kubenswrapper[4784]: I0106 09:39:05.631640 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-699964fbc-tfnwh"] Jan 06 09:39:05 crc kubenswrapper[4784]: E0106 09:39:05.632957 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31150519-a40e-4c55-8b6d-a28ca67c6ff8" containerName="mariadb-account-create-update" Jan 06 09:39:05 crc kubenswrapper[4784]: I0106 09:39:05.632988 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="31150519-a40e-4c55-8b6d-a28ca67c6ff8" containerName="mariadb-account-create-update" Jan 06 09:39:05 crc kubenswrapper[4784]: I0106 09:39:05.633285 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="31150519-a40e-4c55-8b6d-a28ca67c6ff8" containerName="mariadb-account-create-update" Jan 06 09:39:05 crc kubenswrapper[4784]: I0106 09:39:05.636838 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-tfnwh" Jan 06 09:39:05 crc kubenswrapper[4784]: I0106 09:39:05.647770 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-tfnwh"] Jan 06 09:39:05 crc kubenswrapper[4784]: I0106 09:39:05.738676 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/584dcda2-5983-4606-a054-b09385d52977-config\") pod \"dnsmasq-dns-699964fbc-tfnwh\" (UID: \"584dcda2-5983-4606-a054-b09385d52977\") " pod="openstack/dnsmasq-dns-699964fbc-tfnwh" Jan 06 09:39:05 crc kubenswrapper[4784]: I0106 09:39:05.738998 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tc7l2\" (UniqueName: \"kubernetes.io/projected/584dcda2-5983-4606-a054-b09385d52977-kube-api-access-tc7l2\") pod \"dnsmasq-dns-699964fbc-tfnwh\" (UID: \"584dcda2-5983-4606-a054-b09385d52977\") " pod="openstack/dnsmasq-dns-699964fbc-tfnwh" Jan 06 09:39:05 crc kubenswrapper[4784]: I0106 09:39:05.739043 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/584dcda2-5983-4606-a054-b09385d52977-dns-svc\") pod \"dnsmasq-dns-699964fbc-tfnwh\" (UID: \"584dcda2-5983-4606-a054-b09385d52977\") " pod="openstack/dnsmasq-dns-699964fbc-tfnwh" Jan 06 09:39:05 crc kubenswrapper[4784]: I0106 09:39:05.840514 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/584dcda2-5983-4606-a054-b09385d52977-config\") pod \"dnsmasq-dns-699964fbc-tfnwh\" (UID: \"584dcda2-5983-4606-a054-b09385d52977\") " pod="openstack/dnsmasq-dns-699964fbc-tfnwh" Jan 06 09:39:05 crc kubenswrapper[4784]: I0106 09:39:05.840619 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tc7l2\" (UniqueName: \"kubernetes.io/projected/584dcda2-5983-4606-a054-b09385d52977-kube-api-access-tc7l2\") pod \"dnsmasq-dns-699964fbc-tfnwh\" (UID: \"584dcda2-5983-4606-a054-b09385d52977\") " pod="openstack/dnsmasq-dns-699964fbc-tfnwh" Jan 06 09:39:05 crc kubenswrapper[4784]: I0106 09:39:05.840671 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/584dcda2-5983-4606-a054-b09385d52977-dns-svc\") pod \"dnsmasq-dns-699964fbc-tfnwh\" (UID: \"584dcda2-5983-4606-a054-b09385d52977\") " pod="openstack/dnsmasq-dns-699964fbc-tfnwh" Jan 06 09:39:05 crc kubenswrapper[4784]: I0106 09:39:05.841826 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/584dcda2-5983-4606-a054-b09385d52977-dns-svc\") pod \"dnsmasq-dns-699964fbc-tfnwh\" (UID: \"584dcda2-5983-4606-a054-b09385d52977\") " pod="openstack/dnsmasq-dns-699964fbc-tfnwh" Jan 06 09:39:05 crc kubenswrapper[4784]: I0106 09:39:05.842502 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/584dcda2-5983-4606-a054-b09385d52977-config\") pod \"dnsmasq-dns-699964fbc-tfnwh\" (UID: \"584dcda2-5983-4606-a054-b09385d52977\") " pod="openstack/dnsmasq-dns-699964fbc-tfnwh" Jan 06 09:39:05 crc kubenswrapper[4784]: I0106 09:39:05.874748 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tc7l2\" (UniqueName: \"kubernetes.io/projected/584dcda2-5983-4606-a054-b09385d52977-kube-api-access-tc7l2\") pod \"dnsmasq-dns-699964fbc-tfnwh\" (UID: \"584dcda2-5983-4606-a054-b09385d52977\") " pod="openstack/dnsmasq-dns-699964fbc-tfnwh" Jan 06 09:39:05 crc kubenswrapper[4784]: I0106 09:39:05.960671 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-tfnwh" Jan 06 09:39:06 crc kubenswrapper[4784]: I0106 09:39:06.273462 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 09:39:06 crc kubenswrapper[4784]: I0106 09:39:06.593941 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-tfnwh"] Jan 06 09:39:06 crc kubenswrapper[4784]: I0106 09:39:06.806409 4784 generic.go:334] "Generic (PLEG): container finished" podID="584dcda2-5983-4606-a054-b09385d52977" containerID="d6ef3442cba20c4f18d132e55fc8ac02ddd72e01ac6c20a75bb2c835a6bb0b4d" exitCode=0 Jan 06 09:39:06 crc kubenswrapper[4784]: I0106 09:39:06.806506 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-tfnwh" event={"ID":"584dcda2-5983-4606-a054-b09385d52977","Type":"ContainerDied","Data":"d6ef3442cba20c4f18d132e55fc8ac02ddd72e01ac6c20a75bb2c835a6bb0b4d"} Jan 06 09:39:06 crc kubenswrapper[4784]: I0106 09:39:06.806688 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-tfnwh" event={"ID":"584dcda2-5983-4606-a054-b09385d52977","Type":"ContainerStarted","Data":"385d85d750b9e8ab6f03be97f23ef4d2563160190e7651a331b7eda54058b657"} Jan 06 09:39:07 crc kubenswrapper[4784]: I0106 09:39:07.274996 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 09:39:07 crc kubenswrapper[4784]: I0106 09:39:07.816992 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-tfnwh" event={"ID":"584dcda2-5983-4606-a054-b09385d52977","Type":"ContainerStarted","Data":"5a888016b08cc799974df137e01621e2fa6bd011199939cd5d205596e1f4dfdc"} Jan 06 09:39:07 crc kubenswrapper[4784]: I0106 09:39:07.817423 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-699964fbc-tfnwh" Jan 06 09:39:07 crc kubenswrapper[4784]: I0106 09:39:07.839699 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-699964fbc-tfnwh" podStartSLOduration=2.8396785380000003 podStartE2EDuration="2.839678538s" podCreationTimestamp="2026-01-06 09:39:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:39:07.834849088 +0000 UTC m=+5049.881021925" watchObservedRunningTime="2026-01-06 09:39:07.839678538 +0000 UTC m=+5049.885851375" Jan 06 09:39:10 crc kubenswrapper[4784]: I0106 09:39:10.516724 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="932c7932-8fbb-4943-b833-8481ed70de32" containerName="rabbitmq" containerID="cri-o://7e3a9232258adb8f1d116df7993e5046263992356b26f0b7e56fbee5aec43eee" gracePeriod=604796 Jan 06 09:39:11 crc kubenswrapper[4784]: I0106 09:39:11.527622 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="59a80e93-fe77-4c67-a4b7-d1682f1bcf14" containerName="rabbitmq" containerID="cri-o://33bfaa869e2881ea899df11d620eedbfb770f1b6d1ec10d66e2a2342eddacc19" gracePeriod=604796 Jan 06 09:39:14 crc kubenswrapper[4784]: I0106 09:39:14.350697 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:39:14 crc kubenswrapper[4784]: I0106 09:39:14.351130 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:39:15 crc kubenswrapper[4784]: I0106 09:39:15.962789 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-699964fbc-tfnwh" Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.057838 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-6d2wm"] Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.058240 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" podUID="0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5" containerName="dnsmasq-dns" containerID="cri-o://714bf7868ef7fbe307a4fafb054968085883985e497c4dd05aa1bdd0bbd98574" gracePeriod=10 Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.511889 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.629809 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-dns-svc\") pod \"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5\" (UID: \"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5\") " Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.629947 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-config\") pod \"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5\" (UID: \"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5\") " Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.630024 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqz7f\" (UniqueName: \"kubernetes.io/projected/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-kube-api-access-rqz7f\") pod \"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5\" (UID: \"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5\") " Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.634790 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-kube-api-access-rqz7f" (OuterVolumeSpecName: "kube-api-access-rqz7f") pod "0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5" (UID: "0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5"). InnerVolumeSpecName "kube-api-access-rqz7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.667872 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-config" (OuterVolumeSpecName: "config") pod "0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5" (UID: "0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.670501 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5" (UID: "0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.732387 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.732436 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-config\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.732457 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqz7f\" (UniqueName: \"kubernetes.io/projected/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5-kube-api-access-rqz7f\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.896174 4784 generic.go:334] "Generic (PLEG): container finished" podID="0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5" containerID="714bf7868ef7fbe307a4fafb054968085883985e497c4dd05aa1bdd0bbd98574" exitCode=0 Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.896249 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" event={"ID":"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5","Type":"ContainerDied","Data":"714bf7868ef7fbe307a4fafb054968085883985e497c4dd05aa1bdd0bbd98574"} Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.896328 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" event={"ID":"0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5","Type":"ContainerDied","Data":"2c60ea3ebefe51b1a177c5f5c9ebaa1dbbc37e530b7e0b47e068f2bd76b1a9b7"} Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.896335 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-6d2wm" Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.896362 4784 scope.go:117] "RemoveContainer" containerID="714bf7868ef7fbe307a4fafb054968085883985e497c4dd05aa1bdd0bbd98574" Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.901516 4784 generic.go:334] "Generic (PLEG): container finished" podID="932c7932-8fbb-4943-b833-8481ed70de32" containerID="7e3a9232258adb8f1d116df7993e5046263992356b26f0b7e56fbee5aec43eee" exitCode=0 Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.901594 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"932c7932-8fbb-4943-b833-8481ed70de32","Type":"ContainerDied","Data":"7e3a9232258adb8f1d116df7993e5046263992356b26f0b7e56fbee5aec43eee"} Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.927422 4784 scope.go:117] "RemoveContainer" containerID="c7013298326f062d2f068d2c8462b9f760ba2826eeef10805821c060c642e73f" Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.954024 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-6d2wm"] Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.963255 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-6d2wm"] Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.970973 4784 scope.go:117] "RemoveContainer" containerID="714bf7868ef7fbe307a4fafb054968085883985e497c4dd05aa1bdd0bbd98574" Jan 06 09:39:16 crc kubenswrapper[4784]: E0106 09:39:16.971735 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"714bf7868ef7fbe307a4fafb054968085883985e497c4dd05aa1bdd0bbd98574\": container with ID starting with 714bf7868ef7fbe307a4fafb054968085883985e497c4dd05aa1bdd0bbd98574 not found: ID does not exist" containerID="714bf7868ef7fbe307a4fafb054968085883985e497c4dd05aa1bdd0bbd98574" Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.971861 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"714bf7868ef7fbe307a4fafb054968085883985e497c4dd05aa1bdd0bbd98574"} err="failed to get container status \"714bf7868ef7fbe307a4fafb054968085883985e497c4dd05aa1bdd0bbd98574\": rpc error: code = NotFound desc = could not find container \"714bf7868ef7fbe307a4fafb054968085883985e497c4dd05aa1bdd0bbd98574\": container with ID starting with 714bf7868ef7fbe307a4fafb054968085883985e497c4dd05aa1bdd0bbd98574 not found: ID does not exist" Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.972460 4784 scope.go:117] "RemoveContainer" containerID="c7013298326f062d2f068d2c8462b9f760ba2826eeef10805821c060c642e73f" Jan 06 09:39:16 crc kubenswrapper[4784]: E0106 09:39:16.973399 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7013298326f062d2f068d2c8462b9f760ba2826eeef10805821c060c642e73f\": container with ID starting with c7013298326f062d2f068d2c8462b9f760ba2826eeef10805821c060c642e73f not found: ID does not exist" containerID="c7013298326f062d2f068d2c8462b9f760ba2826eeef10805821c060c642e73f" Jan 06 09:39:16 crc kubenswrapper[4784]: I0106 09:39:16.973432 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7013298326f062d2f068d2c8462b9f760ba2826eeef10805821c060c642e73f"} err="failed to get container status \"c7013298326f062d2f068d2c8462b9f760ba2826eeef10805821c060c642e73f\": rpc error: code = NotFound desc = could not find container \"c7013298326f062d2f068d2c8462b9f760ba2826eeef10805821c060c642e73f\": container with ID starting with c7013298326f062d2f068d2c8462b9f760ba2826eeef10805821c060c642e73f not found: ID does not exist" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.091601 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.242091 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-plugins\") pod \"932c7932-8fbb-4943-b833-8481ed70de32\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.242150 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-tls\") pod \"932c7932-8fbb-4943-b833-8481ed70de32\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.242188 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbzwh\" (UniqueName: \"kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-kube-api-access-rbzwh\") pod \"932c7932-8fbb-4943-b833-8481ed70de32\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.242214 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/932c7932-8fbb-4943-b833-8481ed70de32-pod-info\") pod \"932c7932-8fbb-4943-b833-8481ed70de32\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.242427 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a2b79ab4-175f-4949-badc-67d63d98703e\") pod \"932c7932-8fbb-4943-b833-8481ed70de32\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.242460 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-plugins-conf\") pod \"932c7932-8fbb-4943-b833-8481ed70de32\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.242491 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-erlang-cookie\") pod \"932c7932-8fbb-4943-b833-8481ed70de32\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.242483 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "932c7932-8fbb-4943-b833-8481ed70de32" (UID: "932c7932-8fbb-4943-b833-8481ed70de32"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.242510 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/932c7932-8fbb-4943-b833-8481ed70de32-erlang-cookie-secret\") pod \"932c7932-8fbb-4943-b833-8481ed70de32\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.242530 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-server-conf\") pod \"932c7932-8fbb-4943-b833-8481ed70de32\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.242573 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-confd\") pod \"932c7932-8fbb-4943-b833-8481ed70de32\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.242621 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-config-data\") pod \"932c7932-8fbb-4943-b833-8481ed70de32\" (UID: \"932c7932-8fbb-4943-b833-8481ed70de32\") " Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.242914 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.243477 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "932c7932-8fbb-4943-b833-8481ed70de32" (UID: "932c7932-8fbb-4943-b833-8481ed70de32"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.244039 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "932c7932-8fbb-4943-b833-8481ed70de32" (UID: "932c7932-8fbb-4943-b833-8481ed70de32"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.246958 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "932c7932-8fbb-4943-b833-8481ed70de32" (UID: "932c7932-8fbb-4943-b833-8481ed70de32"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.247683 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/932c7932-8fbb-4943-b833-8481ed70de32-pod-info" (OuterVolumeSpecName: "pod-info") pod "932c7932-8fbb-4943-b833-8481ed70de32" (UID: "932c7932-8fbb-4943-b833-8481ed70de32"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.248518 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/932c7932-8fbb-4943-b833-8481ed70de32-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "932c7932-8fbb-4943-b833-8481ed70de32" (UID: "932c7932-8fbb-4943-b833-8481ed70de32"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.251720 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-kube-api-access-rbzwh" (OuterVolumeSpecName: "kube-api-access-rbzwh") pod "932c7932-8fbb-4943-b833-8481ed70de32" (UID: "932c7932-8fbb-4943-b833-8481ed70de32"). InnerVolumeSpecName "kube-api-access-rbzwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.262450 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a2b79ab4-175f-4949-badc-67d63d98703e" (OuterVolumeSpecName: "persistence") pod "932c7932-8fbb-4943-b833-8481ed70de32" (UID: "932c7932-8fbb-4943-b833-8481ed70de32"). InnerVolumeSpecName "pvc-a2b79ab4-175f-4949-badc-67d63d98703e". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.264214 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-config-data" (OuterVolumeSpecName: "config-data") pod "932c7932-8fbb-4943-b833-8481ed70de32" (UID: "932c7932-8fbb-4943-b833-8481ed70de32"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.305339 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-server-conf" (OuterVolumeSpecName: "server-conf") pod "932c7932-8fbb-4943-b833-8481ed70de32" (UID: "932c7932-8fbb-4943-b833-8481ed70de32"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.338475 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "932c7932-8fbb-4943-b833-8481ed70de32" (UID: "932c7932-8fbb-4943-b833-8481ed70de32"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.344401 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.344433 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbzwh\" (UniqueName: \"kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-kube-api-access-rbzwh\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.344450 4784 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/932c7932-8fbb-4943-b833-8481ed70de32-pod-info\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.344502 4784 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-a2b79ab4-175f-4949-badc-67d63d98703e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a2b79ab4-175f-4949-badc-67d63d98703e\") on node \"crc\" " Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.344524 4784 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.344559 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.344577 4784 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/932c7932-8fbb-4943-b833-8481ed70de32-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.344593 4784 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-server-conf\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.344609 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/932c7932-8fbb-4943-b833-8481ed70de32-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.344624 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/932c7932-8fbb-4943-b833-8481ed70de32-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.363114 4784 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.363259 4784 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-a2b79ab4-175f-4949-badc-67d63d98703e" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a2b79ab4-175f-4949-badc-67d63d98703e") on node "crc" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.446188 4784 reconciler_common.go:293] "Volume detached for volume \"pvc-a2b79ab4-175f-4949-badc-67d63d98703e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a2b79ab4-175f-4949-badc-67d63d98703e\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.920184 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.920172 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"932c7932-8fbb-4943-b833-8481ed70de32","Type":"ContainerDied","Data":"e2285a4c2dd3701cd4d4ad1ea42d32a43b9c09ed69d97a5cb39733682bef7d9c"} Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.920373 4784 scope.go:117] "RemoveContainer" containerID="7e3a9232258adb8f1d116df7993e5046263992356b26f0b7e56fbee5aec43eee" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.924043 4784 generic.go:334] "Generic (PLEG): container finished" podID="59a80e93-fe77-4c67-a4b7-d1682f1bcf14" containerID="33bfaa869e2881ea899df11d620eedbfb770f1b6d1ec10d66e2a2342eddacc19" exitCode=0 Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.924118 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"59a80e93-fe77-4c67-a4b7-d1682f1bcf14","Type":"ContainerDied","Data":"33bfaa869e2881ea899df11d620eedbfb770f1b6d1ec10d66e2a2342eddacc19"} Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.952333 4784 scope.go:117] "RemoveContainer" containerID="9faffe6656064c65960493c3f53f77da1d09e667fc586a873ef4951222b51973" Jan 06 09:39:17 crc kubenswrapper[4784]: I0106 09:39:17.980520 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:17.989086 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.034287 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 09:39:18 crc kubenswrapper[4784]: E0106 09:39:18.044905 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5" containerName="init" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.045171 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5" containerName="init" Jan 06 09:39:18 crc kubenswrapper[4784]: E0106 09:39:18.045184 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="932c7932-8fbb-4943-b833-8481ed70de32" containerName="setup-container" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.045191 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="932c7932-8fbb-4943-b833-8481ed70de32" containerName="setup-container" Jan 06 09:39:18 crc kubenswrapper[4784]: E0106 09:39:18.045206 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="932c7932-8fbb-4943-b833-8481ed70de32" containerName="rabbitmq" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.045212 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="932c7932-8fbb-4943-b833-8481ed70de32" containerName="rabbitmq" Jan 06 09:39:18 crc kubenswrapper[4784]: E0106 09:39:18.045224 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5" containerName="dnsmasq-dns" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.045230 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5" containerName="dnsmasq-dns" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.045411 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5" containerName="dnsmasq-dns" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.045424 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="932c7932-8fbb-4943-b833-8481ed70de32" containerName="rabbitmq" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.046266 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.056278 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.056353 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.056594 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.056697 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-zdktv" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.056756 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.056844 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.057030 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.062024 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.140694 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.170747 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.170821 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-config-data\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.170850 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.170881 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.170935 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.171047 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4kmw\" (UniqueName: \"kubernetes.io/projected/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-kube-api-access-q4kmw\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.171094 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.171133 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.171228 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.171258 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a2b79ab4-175f-4949-badc-67d63d98703e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a2b79ab4-175f-4949-badc-67d63d98703e\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.171284 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.271943 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\") pod \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.271985 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-server-conf\") pod \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272013 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-erlang-cookie\") pod \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272053 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lv869\" (UniqueName: \"kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-kube-api-access-lv869\") pod \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272085 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-config-data\") pod \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272103 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-plugins\") pod \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272138 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-pod-info\") pod \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272155 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-plugins-conf\") pod \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272178 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-confd\") pod \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272194 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-tls\") pod \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272274 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-erlang-cookie-secret\") pod \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\" (UID: \"59a80e93-fe77-4c67-a4b7-d1682f1bcf14\") " Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272433 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272453 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a2b79ab4-175f-4949-badc-67d63d98703e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a2b79ab4-175f-4949-badc-67d63d98703e\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272468 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272505 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272530 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-config-data\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272562 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272580 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272605 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272650 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4kmw\" (UniqueName: \"kubernetes.io/projected/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-kube-api-access-q4kmw\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.272669 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.273933 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.273376 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.274266 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.274817 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-config-data\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.273210 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "59a80e93-fe77-4c67-a4b7-d1682f1bcf14" (UID: "59a80e93-fe77-4c67-a4b7-d1682f1bcf14"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.273459 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "59a80e93-fe77-4c67-a4b7-d1682f1bcf14" (UID: "59a80e93-fe77-4c67-a4b7-d1682f1bcf14"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.276003 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.277086 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "59a80e93-fe77-4c67-a4b7-d1682f1bcf14" (UID: "59a80e93-fe77-4c67-a4b7-d1682f1bcf14"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.277407 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.278240 4784 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.278270 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a2b79ab4-175f-4949-badc-67d63d98703e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a2b79ab4-175f-4949-badc-67d63d98703e\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/5f99e721f0b9676a57ee65d594f297905f01f38edfb31ea4d191271f234583dd/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.278636 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.278661 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.280679 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "59a80e93-fe77-4c67-a4b7-d1682f1bcf14" (UID: "59a80e93-fe77-4c67-a4b7-d1682f1bcf14"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.281505 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.282188 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "59a80e93-fe77-4c67-a4b7-d1682f1bcf14" (UID: "59a80e93-fe77-4c67-a4b7-d1682f1bcf14"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.283939 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-pod-info" (OuterVolumeSpecName: "pod-info") pod "59a80e93-fe77-4c67-a4b7-d1682f1bcf14" (UID: "59a80e93-fe77-4c67-a4b7-d1682f1bcf14"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.288785 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-kube-api-access-lv869" (OuterVolumeSpecName: "kube-api-access-lv869") pod "59a80e93-fe77-4c67-a4b7-d1682f1bcf14" (UID: "59a80e93-fe77-4c67-a4b7-d1682f1bcf14"). InnerVolumeSpecName "kube-api-access-lv869". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.294045 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb" (OuterVolumeSpecName: "persistence") pod "59a80e93-fe77-4c67-a4b7-d1682f1bcf14" (UID: "59a80e93-fe77-4c67-a4b7-d1682f1bcf14"). InnerVolumeSpecName "pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.294244 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4kmw\" (UniqueName: \"kubernetes.io/projected/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-kube-api-access-q4kmw\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.296066 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d73722d6-2310-44ed-a5a7-4dc1c0c7df16-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.297651 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-config-data" (OuterVolumeSpecName: "config-data") pod "59a80e93-fe77-4c67-a4b7-d1682f1bcf14" (UID: "59a80e93-fe77-4c67-a4b7-d1682f1bcf14"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.319829 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a2b79ab4-175f-4949-badc-67d63d98703e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a2b79ab4-175f-4949-badc-67d63d98703e\") pod \"rabbitmq-server-0\" (UID: \"d73722d6-2310-44ed-a5a7-4dc1c0c7df16\") " pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.326789 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-server-conf" (OuterVolumeSpecName: "server-conf") pod "59a80e93-fe77-4c67-a4b7-d1682f1bcf14" (UID: "59a80e93-fe77-4c67-a4b7-d1682f1bcf14"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.336958 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5" path="/var/lib/kubelet/pods/0cf6a65d-1e05-4b2c-83df-9fc4b8b66fa5/volumes" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.337850 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="932c7932-8fbb-4943-b833-8481ed70de32" path="/var/lib/kubelet/pods/932c7932-8fbb-4943-b833-8481ed70de32/volumes" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.357891 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "59a80e93-fe77-4c67-a4b7-d1682f1bcf14" (UID: "59a80e93-fe77-4c67-a4b7-d1682f1bcf14"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.375265 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lv869\" (UniqueName: \"kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-kube-api-access-lv869\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.375288 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.375298 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.375307 4784 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-pod-info\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.375314 4784 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.375323 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.375330 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.375338 4784 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.375374 4784 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\") on node \"crc\" " Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.375384 4784 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-server-conf\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.375394 4784 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/59a80e93-fe77-4c67-a4b7-d1682f1bcf14-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.388250 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.401154 4784 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.401325 4784 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb") on node "crc" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.477568 4784 reconciler_common.go:293] "Volume detached for volume \"pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\") on node \"crc\" DevicePath \"\"" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.943968 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"59a80e93-fe77-4c67-a4b7-d1682f1bcf14","Type":"ContainerDied","Data":"38c33824959eab29b47a4543f5ab4ebb6af22db923eb6eb1860cba52b4656319"} Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.944039 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:18 crc kubenswrapper[4784]: I0106 09:39:18.944510 4784 scope.go:117] "RemoveContainer" containerID="33bfaa869e2881ea899df11d620eedbfb770f1b6d1ec10d66e2a2342eddacc19" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.005197 4784 scope.go:117] "RemoveContainer" containerID="07b3e4b3a44be2862df885dade5439e104fed043c5dc46a390eaae7529928b3e" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.035147 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.042413 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.067983 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 09:39:19 crc kubenswrapper[4784]: E0106 09:39:19.068339 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59a80e93-fe77-4c67-a4b7-d1682f1bcf14" containerName="rabbitmq" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.068355 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="59a80e93-fe77-4c67-a4b7-d1682f1bcf14" containerName="rabbitmq" Jan 06 09:39:19 crc kubenswrapper[4784]: E0106 09:39:19.068374 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59a80e93-fe77-4c67-a4b7-d1682f1bcf14" containerName="setup-container" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.068382 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="59a80e93-fe77-4c67-a4b7-d1682f1bcf14" containerName="setup-container" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.068624 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="59a80e93-fe77-4c67-a4b7-d1682f1bcf14" containerName="rabbitmq" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.070623 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.073523 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.073863 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-smbkr" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.078925 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.079224 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.079395 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.079482 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.079670 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.092688 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.195684 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3f448393-522c-4929-be8a-77f402e1d402-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.195808 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3f448393-522c-4929-be8a-77f402e1d402-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.195887 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3f448393-522c-4929-be8a-77f402e1d402-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.195929 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3f448393-522c-4929-be8a-77f402e1d402-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.196138 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3f448393-522c-4929-be8a-77f402e1d402-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.196434 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3f448393-522c-4929-be8a-77f402e1d402-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.196589 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.196654 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3f448393-522c-4929-be8a-77f402e1d402-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.196695 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3f448393-522c-4929-be8a-77f402e1d402-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.196757 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3f448393-522c-4929-be8a-77f402e1d402-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.196833 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvj7r\" (UniqueName: \"kubernetes.io/projected/3f448393-522c-4929-be8a-77f402e1d402-kube-api-access-tvj7r\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.299103 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3f448393-522c-4929-be8a-77f402e1d402-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.299195 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3f448393-522c-4929-be8a-77f402e1d402-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.299254 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.299284 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3f448393-522c-4929-be8a-77f402e1d402-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.299310 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3f448393-522c-4929-be8a-77f402e1d402-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.299379 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3f448393-522c-4929-be8a-77f402e1d402-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.299403 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvj7r\" (UniqueName: \"kubernetes.io/projected/3f448393-522c-4929-be8a-77f402e1d402-kube-api-access-tvj7r\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.299444 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3f448393-522c-4929-be8a-77f402e1d402-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.299479 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3f448393-522c-4929-be8a-77f402e1d402-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.299513 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3f448393-522c-4929-be8a-77f402e1d402-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.299535 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3f448393-522c-4929-be8a-77f402e1d402-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.299998 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3f448393-522c-4929-be8a-77f402e1d402-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.300161 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3f448393-522c-4929-be8a-77f402e1d402-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.301446 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3f448393-522c-4929-be8a-77f402e1d402-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.303486 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3f448393-522c-4929-be8a-77f402e1d402-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.303796 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3f448393-522c-4929-be8a-77f402e1d402-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.306110 4784 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.306166 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/c174523447f4e00f60312996a8e4573c039f70780f76021176193e3f554aa553/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.306449 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3f448393-522c-4929-be8a-77f402e1d402-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.306939 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3f448393-522c-4929-be8a-77f402e1d402-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.307933 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3f448393-522c-4929-be8a-77f402e1d402-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.308442 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3f448393-522c-4929-be8a-77f402e1d402-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.323918 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvj7r\" (UniqueName: \"kubernetes.io/projected/3f448393-522c-4929-be8a-77f402e1d402-kube-api-access-tvj7r\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.371734 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a6223a1c-f78d-4caa-bf30-bb32b97e04cb\") pod \"rabbitmq-cell1-server-0\" (UID: \"3f448393-522c-4929-be8a-77f402e1d402\") " pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.398252 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.401266 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.680257 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 06 09:39:19 crc kubenswrapper[4784]: W0106 09:39:19.688033 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f448393_522c_4929_be8a_77f402e1d402.slice/crio-b801a100430407396ec46124d6feed9ca55223ae4016539af1152a7b1a4eee31 WatchSource:0}: Error finding container b801a100430407396ec46124d6feed9ca55223ae4016539af1152a7b1a4eee31: Status 404 returned error can't find the container with id b801a100430407396ec46124d6feed9ca55223ae4016539af1152a7b1a4eee31 Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.955833 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d73722d6-2310-44ed-a5a7-4dc1c0c7df16","Type":"ContainerStarted","Data":"38b74274021706ac58e4044182a152a2623c6c20a02387587d1be492faf47c75"} Jan 06 09:39:19 crc kubenswrapper[4784]: I0106 09:39:19.958008 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3f448393-522c-4929-be8a-77f402e1d402","Type":"ContainerStarted","Data":"b801a100430407396ec46124d6feed9ca55223ae4016539af1152a7b1a4eee31"} Jan 06 09:39:20 crc kubenswrapper[4784]: I0106 09:39:20.328360 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59a80e93-fe77-4c67-a4b7-d1682f1bcf14" path="/var/lib/kubelet/pods/59a80e93-fe77-4c67-a4b7-d1682f1bcf14/volumes" Jan 06 09:39:21 crc kubenswrapper[4784]: I0106 09:39:21.994537 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d73722d6-2310-44ed-a5a7-4dc1c0c7df16","Type":"ContainerStarted","Data":"a6fb59ea2544455e911bf62d7e0ce8a77e4ef46ddc955b60a1220519f4305d0d"} Jan 06 09:39:22 crc kubenswrapper[4784]: I0106 09:39:22.000048 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3f448393-522c-4929-be8a-77f402e1d402","Type":"ContainerStarted","Data":"68869cb19be1c5679b69f23fd6ed47f5c54e60949c2284798f1e3a91e30f3c64"} Jan 06 09:39:44 crc kubenswrapper[4784]: I0106 09:39:44.350931 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:39:44 crc kubenswrapper[4784]: I0106 09:39:44.352656 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:39:44 crc kubenswrapper[4784]: I0106 09:39:44.352732 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 09:39:44 crc kubenswrapper[4784]: I0106 09:39:44.353511 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 09:39:44 crc kubenswrapper[4784]: I0106 09:39:44.353651 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" gracePeriod=600 Jan 06 09:39:44 crc kubenswrapper[4784]: E0106 09:39:44.486072 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:39:45 crc kubenswrapper[4784]: I0106 09:39:45.269480 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" exitCode=0 Jan 06 09:39:45 crc kubenswrapper[4784]: I0106 09:39:45.269611 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f"} Jan 06 09:39:45 crc kubenswrapper[4784]: I0106 09:39:45.269945 4784 scope.go:117] "RemoveContainer" containerID="b7b9e448a6f33bd942c79767c82389e1acef1e178a7310c3b4956a84d6bbce1a" Jan 06 09:39:45 crc kubenswrapper[4784]: I0106 09:39:45.270660 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:39:45 crc kubenswrapper[4784]: E0106 09:39:45.271063 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:39:55 crc kubenswrapper[4784]: I0106 09:39:55.375116 4784 generic.go:334] "Generic (PLEG): container finished" podID="d73722d6-2310-44ed-a5a7-4dc1c0c7df16" containerID="a6fb59ea2544455e911bf62d7e0ce8a77e4ef46ddc955b60a1220519f4305d0d" exitCode=0 Jan 06 09:39:55 crc kubenswrapper[4784]: I0106 09:39:55.375213 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d73722d6-2310-44ed-a5a7-4dc1c0c7df16","Type":"ContainerDied","Data":"a6fb59ea2544455e911bf62d7e0ce8a77e4ef46ddc955b60a1220519f4305d0d"} Jan 06 09:39:55 crc kubenswrapper[4784]: I0106 09:39:55.381538 4784 generic.go:334] "Generic (PLEG): container finished" podID="3f448393-522c-4929-be8a-77f402e1d402" containerID="68869cb19be1c5679b69f23fd6ed47f5c54e60949c2284798f1e3a91e30f3c64" exitCode=0 Jan 06 09:39:55 crc kubenswrapper[4784]: I0106 09:39:55.381642 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3f448393-522c-4929-be8a-77f402e1d402","Type":"ContainerDied","Data":"68869cb19be1c5679b69f23fd6ed47f5c54e60949c2284798f1e3a91e30f3c64"} Jan 06 09:39:56 crc kubenswrapper[4784]: I0106 09:39:56.393965 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d73722d6-2310-44ed-a5a7-4dc1c0c7df16","Type":"ContainerStarted","Data":"3222cf5f2fdcccf30f7f5f857e495a63811f3f5637e551a44e0d42a5743a72b5"} Jan 06 09:39:56 crc kubenswrapper[4784]: I0106 09:39:56.394734 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 06 09:39:56 crc kubenswrapper[4784]: I0106 09:39:56.397284 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3f448393-522c-4929-be8a-77f402e1d402","Type":"ContainerStarted","Data":"abe9938a161210ff190f8fdfa0a8214ab5ed6e35d3a6a03ff03da8bad4578446"} Jan 06 09:39:56 crc kubenswrapper[4784]: I0106 09:39:56.397931 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:39:56 crc kubenswrapper[4784]: I0106 09:39:56.431053 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=39.431027466 podStartE2EDuration="39.431027466s" podCreationTimestamp="2026-01-06 09:39:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:39:56.427663882 +0000 UTC m=+5098.473836779" watchObservedRunningTime="2026-01-06 09:39:56.431027466 +0000 UTC m=+5098.477200343" Jan 06 09:39:56 crc kubenswrapper[4784]: I0106 09:39:56.472325 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.472303806 podStartE2EDuration="37.472303806s" podCreationTimestamp="2026-01-06 09:39:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:39:56.465083963 +0000 UTC m=+5098.511256850" watchObservedRunningTime="2026-01-06 09:39:56.472303806 +0000 UTC m=+5098.518476653" Jan 06 09:39:57 crc kubenswrapper[4784]: I0106 09:39:57.312334 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:39:57 crc kubenswrapper[4784]: E0106 09:39:57.312612 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:40:08 crc kubenswrapper[4784]: I0106 09:40:08.318943 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:40:08 crc kubenswrapper[4784]: E0106 09:40:08.320205 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:40:08 crc kubenswrapper[4784]: I0106 09:40:08.391803 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 06 09:40:09 crc kubenswrapper[4784]: I0106 09:40:09.403894 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 06 09:40:14 crc kubenswrapper[4784]: I0106 09:40:14.163679 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1-default"] Jan 06 09:40:14 crc kubenswrapper[4784]: I0106 09:40:14.165820 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Jan 06 09:40:14 crc kubenswrapper[4784]: I0106 09:40:14.173875 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Jan 06 09:40:14 crc kubenswrapper[4784]: I0106 09:40:14.174187 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-qcr5c" Jan 06 09:40:14 crc kubenswrapper[4784]: I0106 09:40:14.234986 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvq6w\" (UniqueName: \"kubernetes.io/projected/6e8fc21f-f2c5-4c32-9556-f4014a551d1b-kube-api-access-mvq6w\") pod \"mariadb-client-1-default\" (UID: \"6e8fc21f-f2c5-4c32-9556-f4014a551d1b\") " pod="openstack/mariadb-client-1-default" Jan 06 09:40:14 crc kubenswrapper[4784]: I0106 09:40:14.337396 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvq6w\" (UniqueName: \"kubernetes.io/projected/6e8fc21f-f2c5-4c32-9556-f4014a551d1b-kube-api-access-mvq6w\") pod \"mariadb-client-1-default\" (UID: \"6e8fc21f-f2c5-4c32-9556-f4014a551d1b\") " pod="openstack/mariadb-client-1-default" Jan 06 09:40:14 crc kubenswrapper[4784]: I0106 09:40:14.382867 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvq6w\" (UniqueName: \"kubernetes.io/projected/6e8fc21f-f2c5-4c32-9556-f4014a551d1b-kube-api-access-mvq6w\") pod \"mariadb-client-1-default\" (UID: \"6e8fc21f-f2c5-4c32-9556-f4014a551d1b\") " pod="openstack/mariadb-client-1-default" Jan 06 09:40:14 crc kubenswrapper[4784]: I0106 09:40:14.498976 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Jan 06 09:40:15 crc kubenswrapper[4784]: W0106 09:40:15.121383 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6e8fc21f_f2c5_4c32_9556_f4014a551d1b.slice/crio-334031e3ebe286f4bb20a1f44e953e9d2e89f72789d18b6285976f1f727b22bf WatchSource:0}: Error finding container 334031e3ebe286f4bb20a1f44e953e9d2e89f72789d18b6285976f1f727b22bf: Status 404 returned error can't find the container with id 334031e3ebe286f4bb20a1f44e953e9d2e89f72789d18b6285976f1f727b22bf Jan 06 09:40:15 crc kubenswrapper[4784]: I0106 09:40:15.122013 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Jan 06 09:40:15 crc kubenswrapper[4784]: I0106 09:40:15.574054 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"6e8fc21f-f2c5-4c32-9556-f4014a551d1b","Type":"ContainerStarted","Data":"334031e3ebe286f4bb20a1f44e953e9d2e89f72789d18b6285976f1f727b22bf"} Jan 06 09:40:16 crc kubenswrapper[4784]: I0106 09:40:16.586042 4784 generic.go:334] "Generic (PLEG): container finished" podID="6e8fc21f-f2c5-4c32-9556-f4014a551d1b" containerID="b961726374c6289974265b07526c01128563ed41d7b3bb12a511c39de74a63a1" exitCode=0 Jan 06 09:40:16 crc kubenswrapper[4784]: I0106 09:40:16.586143 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"6e8fc21f-f2c5-4c32-9556-f4014a551d1b","Type":"ContainerDied","Data":"b961726374c6289974265b07526c01128563ed41d7b3bb12a511c39de74a63a1"} Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.160278 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.194050 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1-default_6e8fc21f-f2c5-4c32-9556-f4014a551d1b/mariadb-client-1-default/0.log" Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.200303 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvq6w\" (UniqueName: \"kubernetes.io/projected/6e8fc21f-f2c5-4c32-9556-f4014a551d1b-kube-api-access-mvq6w\") pod \"6e8fc21f-f2c5-4c32-9556-f4014a551d1b\" (UID: \"6e8fc21f-f2c5-4c32-9556-f4014a551d1b\") " Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.208516 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e8fc21f-f2c5-4c32-9556-f4014a551d1b-kube-api-access-mvq6w" (OuterVolumeSpecName: "kube-api-access-mvq6w") pod "6e8fc21f-f2c5-4c32-9556-f4014a551d1b" (UID: "6e8fc21f-f2c5-4c32-9556-f4014a551d1b"). InnerVolumeSpecName "kube-api-access-mvq6w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.234729 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1-default"] Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.243065 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1-default"] Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.301999 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvq6w\" (UniqueName: \"kubernetes.io/projected/6e8fc21f-f2c5-4c32-9556-f4014a551d1b-kube-api-access-mvq6w\") on node \"crc\" DevicePath \"\"" Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.328885 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e8fc21f-f2c5-4c32-9556-f4014a551d1b" path="/var/lib/kubelet/pods/6e8fc21f-f2c5-4c32-9556-f4014a551d1b/volumes" Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.604030 4784 scope.go:117] "RemoveContainer" containerID="b961726374c6289974265b07526c01128563ed41d7b3bb12a511c39de74a63a1" Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.604058 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.657717 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2-default"] Jan 06 09:40:18 crc kubenswrapper[4784]: E0106 09:40:18.658071 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e8fc21f-f2c5-4c32-9556-f4014a551d1b" containerName="mariadb-client-1-default" Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.658085 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e8fc21f-f2c5-4c32-9556-f4014a551d1b" containerName="mariadb-client-1-default" Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.658248 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e8fc21f-f2c5-4c32-9556-f4014a551d1b" containerName="mariadb-client-1-default" Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.658759 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.661888 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-qcr5c" Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.682521 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.708830 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjqdv\" (UniqueName: \"kubernetes.io/projected/f8fc0549-1588-453f-bba6-dfe7fadecd18-kube-api-access-jjqdv\") pod \"mariadb-client-2-default\" (UID: \"f8fc0549-1588-453f-bba6-dfe7fadecd18\") " pod="openstack/mariadb-client-2-default" Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.810276 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjqdv\" (UniqueName: \"kubernetes.io/projected/f8fc0549-1588-453f-bba6-dfe7fadecd18-kube-api-access-jjqdv\") pod \"mariadb-client-2-default\" (UID: \"f8fc0549-1588-453f-bba6-dfe7fadecd18\") " pod="openstack/mariadb-client-2-default" Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.837664 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjqdv\" (UniqueName: \"kubernetes.io/projected/f8fc0549-1588-453f-bba6-dfe7fadecd18-kube-api-access-jjqdv\") pod \"mariadb-client-2-default\" (UID: \"f8fc0549-1588-453f-bba6-dfe7fadecd18\") " pod="openstack/mariadb-client-2-default" Jan 06 09:40:18 crc kubenswrapper[4784]: I0106 09:40:18.989842 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Jan 06 09:40:19 crc kubenswrapper[4784]: I0106 09:40:19.595120 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Jan 06 09:40:19 crc kubenswrapper[4784]: W0106 09:40:19.603440 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8fc0549_1588_453f_bba6_dfe7fadecd18.slice/crio-025054ecce17b90b7b01ceae917665e4334eb5a8be5e1cb48b16dd42c5143243 WatchSource:0}: Error finding container 025054ecce17b90b7b01ceae917665e4334eb5a8be5e1cb48b16dd42c5143243: Status 404 returned error can't find the container with id 025054ecce17b90b7b01ceae917665e4334eb5a8be5e1cb48b16dd42c5143243 Jan 06 09:40:19 crc kubenswrapper[4784]: I0106 09:40:19.620069 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"f8fc0549-1588-453f-bba6-dfe7fadecd18","Type":"ContainerStarted","Data":"025054ecce17b90b7b01ceae917665e4334eb5a8be5e1cb48b16dd42c5143243"} Jan 06 09:40:20 crc kubenswrapper[4784]: I0106 09:40:20.630730 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"f8fc0549-1588-453f-bba6-dfe7fadecd18","Type":"ContainerStarted","Data":"90b4edc0a4d6664a79e304ca546da9883364ffe64ec55ab20db10ce9bdcf223d"} Jan 06 09:40:20 crc kubenswrapper[4784]: I0106 09:40:20.666355 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client-2-default" podStartSLOduration=2.666325977 podStartE2EDuration="2.666325977s" podCreationTimestamp="2026-01-06 09:40:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:40:20.648460722 +0000 UTC m=+5122.694633579" watchObservedRunningTime="2026-01-06 09:40:20.666325977 +0000 UTC m=+5122.712498864" Jan 06 09:40:20 crc kubenswrapper[4784]: I0106 09:40:20.722141 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2-default_f8fc0549-1588-453f-bba6-dfe7fadecd18/mariadb-client-2-default/0.log" Jan 06 09:40:21 crc kubenswrapper[4784]: I0106 09:40:21.640199 4784 generic.go:334] "Generic (PLEG): container finished" podID="f8fc0549-1588-453f-bba6-dfe7fadecd18" containerID="90b4edc0a4d6664a79e304ca546da9883364ffe64ec55ab20db10ce9bdcf223d" exitCode=1 Jan 06 09:40:21 crc kubenswrapper[4784]: I0106 09:40:21.640261 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"f8fc0549-1588-453f-bba6-dfe7fadecd18","Type":"ContainerDied","Data":"90b4edc0a4d6664a79e304ca546da9883364ffe64ec55ab20db10ce9bdcf223d"} Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.040408 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.075479 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2-default"] Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.080341 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2-default"] Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.081365 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jjqdv\" (UniqueName: \"kubernetes.io/projected/f8fc0549-1588-453f-bba6-dfe7fadecd18-kube-api-access-jjqdv\") pod \"f8fc0549-1588-453f-bba6-dfe7fadecd18\" (UID: \"f8fc0549-1588-453f-bba6-dfe7fadecd18\") " Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.090222 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8fc0549-1588-453f-bba6-dfe7fadecd18-kube-api-access-jjqdv" (OuterVolumeSpecName: "kube-api-access-jjqdv") pod "f8fc0549-1588-453f-bba6-dfe7fadecd18" (UID: "f8fc0549-1588-453f-bba6-dfe7fadecd18"). InnerVolumeSpecName "kube-api-access-jjqdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.185245 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jjqdv\" (UniqueName: \"kubernetes.io/projected/f8fc0549-1588-453f-bba6-dfe7fadecd18-kube-api-access-jjqdv\") on node \"crc\" DevicePath \"\"" Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.312719 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:40:23 crc kubenswrapper[4784]: E0106 09:40:23.313314 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.518500 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1"] Jan 06 09:40:23 crc kubenswrapper[4784]: E0106 09:40:23.518784 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8fc0549-1588-453f-bba6-dfe7fadecd18" containerName="mariadb-client-2-default" Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.518796 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8fc0549-1588-453f-bba6-dfe7fadecd18" containerName="mariadb-client-2-default" Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.518972 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8fc0549-1588-453f-bba6-dfe7fadecd18" containerName="mariadb-client-2-default" Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.519408 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.526975 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.593687 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkl95\" (UniqueName: \"kubernetes.io/projected/99e65389-d3f0-4128-b2c6-172ef0bc1867-kube-api-access-qkl95\") pod \"mariadb-client-1\" (UID: \"99e65389-d3f0-4128-b2c6-172ef0bc1867\") " pod="openstack/mariadb-client-1" Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.658583 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="025054ecce17b90b7b01ceae917665e4334eb5a8be5e1cb48b16dd42c5143243" Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.658708 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.695964 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkl95\" (UniqueName: \"kubernetes.io/projected/99e65389-d3f0-4128-b2c6-172ef0bc1867-kube-api-access-qkl95\") pod \"mariadb-client-1\" (UID: \"99e65389-d3f0-4128-b2c6-172ef0bc1867\") " pod="openstack/mariadb-client-1" Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.721644 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkl95\" (UniqueName: \"kubernetes.io/projected/99e65389-d3f0-4128-b2c6-172ef0bc1867-kube-api-access-qkl95\") pod \"mariadb-client-1\" (UID: \"99e65389-d3f0-4128-b2c6-172ef0bc1867\") " pod="openstack/mariadb-client-1" Jan 06 09:40:23 crc kubenswrapper[4784]: I0106 09:40:23.835115 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Jan 06 09:40:24 crc kubenswrapper[4784]: I0106 09:40:24.320800 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8fc0549-1588-453f-bba6-dfe7fadecd18" path="/var/lib/kubelet/pods/f8fc0549-1588-453f-bba6-dfe7fadecd18/volumes" Jan 06 09:40:24 crc kubenswrapper[4784]: I0106 09:40:24.327326 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Jan 06 09:40:24 crc kubenswrapper[4784]: I0106 09:40:24.667618 4784 generic.go:334] "Generic (PLEG): container finished" podID="99e65389-d3f0-4128-b2c6-172ef0bc1867" containerID="5ce3144bcaf6761ce1b29a388992a580b4be22254642d01e934c2000ee757e44" exitCode=0 Jan 06 09:40:24 crc kubenswrapper[4784]: I0106 09:40:24.667775 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"99e65389-d3f0-4128-b2c6-172ef0bc1867","Type":"ContainerDied","Data":"5ce3144bcaf6761ce1b29a388992a580b4be22254642d01e934c2000ee757e44"} Jan 06 09:40:24 crc kubenswrapper[4784]: I0106 09:40:24.668676 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"99e65389-d3f0-4128-b2c6-172ef0bc1867","Type":"ContainerStarted","Data":"696e9ca20ab1e239e4420a9bba6f98c860ffefa61c6c26b0aae09e7269ac4181"} Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.150032 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.171276 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1_99e65389-d3f0-4128-b2c6-172ef0bc1867/mariadb-client-1/0.log" Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.199008 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1"] Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.204593 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1"] Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.235400 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkl95\" (UniqueName: \"kubernetes.io/projected/99e65389-d3f0-4128-b2c6-172ef0bc1867-kube-api-access-qkl95\") pod \"99e65389-d3f0-4128-b2c6-172ef0bc1867\" (UID: \"99e65389-d3f0-4128-b2c6-172ef0bc1867\") " Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.240710 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99e65389-d3f0-4128-b2c6-172ef0bc1867-kube-api-access-qkl95" (OuterVolumeSpecName: "kube-api-access-qkl95") pod "99e65389-d3f0-4128-b2c6-172ef0bc1867" (UID: "99e65389-d3f0-4128-b2c6-172ef0bc1867"). InnerVolumeSpecName "kube-api-access-qkl95". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.320210 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99e65389-d3f0-4128-b2c6-172ef0bc1867" path="/var/lib/kubelet/pods/99e65389-d3f0-4128-b2c6-172ef0bc1867/volumes" Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.338079 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkl95\" (UniqueName: \"kubernetes.io/projected/99e65389-d3f0-4128-b2c6-172ef0bc1867-kube-api-access-qkl95\") on node \"crc\" DevicePath \"\"" Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.687627 4784 scope.go:117] "RemoveContainer" containerID="5ce3144bcaf6761ce1b29a388992a580b4be22254642d01e934c2000ee757e44" Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.687688 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.704184 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-4-default"] Jan 06 09:40:26 crc kubenswrapper[4784]: E0106 09:40:26.706186 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99e65389-d3f0-4128-b2c6-172ef0bc1867" containerName="mariadb-client-1" Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.706225 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="99e65389-d3f0-4128-b2c6-172ef0bc1867" containerName="mariadb-client-1" Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.707023 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="99e65389-d3f0-4128-b2c6-172ef0bc1867" containerName="mariadb-client-1" Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.707973 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.710736 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-qcr5c" Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.720842 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.744819 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lvb4\" (UniqueName: \"kubernetes.io/projected/0f58b6c3-2dd9-4d55-b5ef-797af96f1c76-kube-api-access-6lvb4\") pod \"mariadb-client-4-default\" (UID: \"0f58b6c3-2dd9-4d55-b5ef-797af96f1c76\") " pod="openstack/mariadb-client-4-default" Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.846197 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lvb4\" (UniqueName: \"kubernetes.io/projected/0f58b6c3-2dd9-4d55-b5ef-797af96f1c76-kube-api-access-6lvb4\") pod \"mariadb-client-4-default\" (UID: \"0f58b6c3-2dd9-4d55-b5ef-797af96f1c76\") " pod="openstack/mariadb-client-4-default" Jan 06 09:40:26 crc kubenswrapper[4784]: I0106 09:40:26.866433 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lvb4\" (UniqueName: \"kubernetes.io/projected/0f58b6c3-2dd9-4d55-b5ef-797af96f1c76-kube-api-access-6lvb4\") pod \"mariadb-client-4-default\" (UID: \"0f58b6c3-2dd9-4d55-b5ef-797af96f1c76\") " pod="openstack/mariadb-client-4-default" Jan 06 09:40:27 crc kubenswrapper[4784]: I0106 09:40:27.065754 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Jan 06 09:40:27 crc kubenswrapper[4784]: I0106 09:40:27.656792 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Jan 06 09:40:27 crc kubenswrapper[4784]: I0106 09:40:27.711557 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"0f58b6c3-2dd9-4d55-b5ef-797af96f1c76","Type":"ContainerStarted","Data":"083c0347e8d030eb004667dae15fd11dd30175a5dafff19d871cc232092c5fe6"} Jan 06 09:40:28 crc kubenswrapper[4784]: I0106 09:40:28.724185 4784 generic.go:334] "Generic (PLEG): container finished" podID="0f58b6c3-2dd9-4d55-b5ef-797af96f1c76" containerID="c1f53672bcd3fb880429d6d06e1cbe171a412c7697bcc7d97a3e52f6d6719216" exitCode=0 Jan 06 09:40:28 crc kubenswrapper[4784]: I0106 09:40:28.724257 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"0f58b6c3-2dd9-4d55-b5ef-797af96f1c76","Type":"ContainerDied","Data":"c1f53672bcd3fb880429d6d06e1cbe171a412c7697bcc7d97a3e52f6d6719216"} Jan 06 09:40:30 crc kubenswrapper[4784]: I0106 09:40:30.269689 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Jan 06 09:40:30 crc kubenswrapper[4784]: I0106 09:40:30.297555 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-4-default_0f58b6c3-2dd9-4d55-b5ef-797af96f1c76/mariadb-client-4-default/0.log" Jan 06 09:40:30 crc kubenswrapper[4784]: I0106 09:40:30.329839 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-4-default"] Jan 06 09:40:30 crc kubenswrapper[4784]: I0106 09:40:30.329896 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-4-default"] Jan 06 09:40:30 crc kubenswrapper[4784]: I0106 09:40:30.344148 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6lvb4\" (UniqueName: \"kubernetes.io/projected/0f58b6c3-2dd9-4d55-b5ef-797af96f1c76-kube-api-access-6lvb4\") pod \"0f58b6c3-2dd9-4d55-b5ef-797af96f1c76\" (UID: \"0f58b6c3-2dd9-4d55-b5ef-797af96f1c76\") " Jan 06 09:40:30 crc kubenswrapper[4784]: I0106 09:40:30.357717 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f58b6c3-2dd9-4d55-b5ef-797af96f1c76-kube-api-access-6lvb4" (OuterVolumeSpecName: "kube-api-access-6lvb4") pod "0f58b6c3-2dd9-4d55-b5ef-797af96f1c76" (UID: "0f58b6c3-2dd9-4d55-b5ef-797af96f1c76"). InnerVolumeSpecName "kube-api-access-6lvb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:40:30 crc kubenswrapper[4784]: I0106 09:40:30.445894 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6lvb4\" (UniqueName: \"kubernetes.io/projected/0f58b6c3-2dd9-4d55-b5ef-797af96f1c76-kube-api-access-6lvb4\") on node \"crc\" DevicePath \"\"" Jan 06 09:40:30 crc kubenswrapper[4784]: I0106 09:40:30.744802 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="083c0347e8d030eb004667dae15fd11dd30175a5dafff19d871cc232092c5fe6" Jan 06 09:40:30 crc kubenswrapper[4784]: I0106 09:40:30.744929 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Jan 06 09:40:32 crc kubenswrapper[4784]: I0106 09:40:32.329866 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f58b6c3-2dd9-4d55-b5ef-797af96f1c76" path="/var/lib/kubelet/pods/0f58b6c3-2dd9-4d55-b5ef-797af96f1c76/volumes" Jan 06 09:40:34 crc kubenswrapper[4784]: I0106 09:40:34.268940 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-5-default"] Jan 06 09:40:34 crc kubenswrapper[4784]: E0106 09:40:34.270172 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f58b6c3-2dd9-4d55-b5ef-797af96f1c76" containerName="mariadb-client-4-default" Jan 06 09:40:34 crc kubenswrapper[4784]: I0106 09:40:34.270197 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f58b6c3-2dd9-4d55-b5ef-797af96f1c76" containerName="mariadb-client-4-default" Jan 06 09:40:34 crc kubenswrapper[4784]: I0106 09:40:34.270846 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f58b6c3-2dd9-4d55-b5ef-797af96f1c76" containerName="mariadb-client-4-default" Jan 06 09:40:34 crc kubenswrapper[4784]: I0106 09:40:34.272008 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Jan 06 09:40:34 crc kubenswrapper[4784]: I0106 09:40:34.278041 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-qcr5c" Jan 06 09:40:34 crc kubenswrapper[4784]: I0106 09:40:34.288236 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Jan 06 09:40:34 crc kubenswrapper[4784]: I0106 09:40:34.313441 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7k4rl\" (UniqueName: \"kubernetes.io/projected/e7941f03-0fad-4bdc-b664-be56273d0f07-kube-api-access-7k4rl\") pod \"mariadb-client-5-default\" (UID: \"e7941f03-0fad-4bdc-b664-be56273d0f07\") " pod="openstack/mariadb-client-5-default" Jan 06 09:40:34 crc kubenswrapper[4784]: I0106 09:40:34.414597 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7k4rl\" (UniqueName: \"kubernetes.io/projected/e7941f03-0fad-4bdc-b664-be56273d0f07-kube-api-access-7k4rl\") pod \"mariadb-client-5-default\" (UID: \"e7941f03-0fad-4bdc-b664-be56273d0f07\") " pod="openstack/mariadb-client-5-default" Jan 06 09:40:34 crc kubenswrapper[4784]: I0106 09:40:34.443103 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7k4rl\" (UniqueName: \"kubernetes.io/projected/e7941f03-0fad-4bdc-b664-be56273d0f07-kube-api-access-7k4rl\") pod \"mariadb-client-5-default\" (UID: \"e7941f03-0fad-4bdc-b664-be56273d0f07\") " pod="openstack/mariadb-client-5-default" Jan 06 09:40:34 crc kubenswrapper[4784]: I0106 09:40:34.607973 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Jan 06 09:40:35 crc kubenswrapper[4784]: I0106 09:40:35.156188 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Jan 06 09:40:35 crc kubenswrapper[4784]: W0106 09:40:35.166669 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7941f03_0fad_4bdc_b664_be56273d0f07.slice/crio-25d321ba60e2ae55e54a74644d5d6ec84d1b480194607345a77791227076d3d6 WatchSource:0}: Error finding container 25d321ba60e2ae55e54a74644d5d6ec84d1b480194607345a77791227076d3d6: Status 404 returned error can't find the container with id 25d321ba60e2ae55e54a74644d5d6ec84d1b480194607345a77791227076d3d6 Jan 06 09:40:35 crc kubenswrapper[4784]: I0106 09:40:35.788253 4784 generic.go:334] "Generic (PLEG): container finished" podID="e7941f03-0fad-4bdc-b664-be56273d0f07" containerID="f454c0cfeb169cb798895409809ae68f19010eacce8e1255d40ba6b2a27811e3" exitCode=0 Jan 06 09:40:35 crc kubenswrapper[4784]: I0106 09:40:35.788375 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"e7941f03-0fad-4bdc-b664-be56273d0f07","Type":"ContainerDied","Data":"f454c0cfeb169cb798895409809ae68f19010eacce8e1255d40ba6b2a27811e3"} Jan 06 09:40:35 crc kubenswrapper[4784]: I0106 09:40:35.788667 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"e7941f03-0fad-4bdc-b664-be56273d0f07","Type":"ContainerStarted","Data":"25d321ba60e2ae55e54a74644d5d6ec84d1b480194607345a77791227076d3d6"} Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.360806 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.384277 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-5-default_e7941f03-0fad-4bdc-b664-be56273d0f07/mariadb-client-5-default/0.log" Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.423965 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-5-default"] Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.438683 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-5-default"] Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.463824 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7k4rl\" (UniqueName: \"kubernetes.io/projected/e7941f03-0fad-4bdc-b664-be56273d0f07-kube-api-access-7k4rl\") pod \"e7941f03-0fad-4bdc-b664-be56273d0f07\" (UID: \"e7941f03-0fad-4bdc-b664-be56273d0f07\") " Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.471046 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7941f03-0fad-4bdc-b664-be56273d0f07-kube-api-access-7k4rl" (OuterVolumeSpecName: "kube-api-access-7k4rl") pod "e7941f03-0fad-4bdc-b664-be56273d0f07" (UID: "e7941f03-0fad-4bdc-b664-be56273d0f07"). InnerVolumeSpecName "kube-api-access-7k4rl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.566604 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7k4rl\" (UniqueName: \"kubernetes.io/projected/e7941f03-0fad-4bdc-b664-be56273d0f07-kube-api-access-7k4rl\") on node \"crc\" DevicePath \"\"" Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.586209 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-6-default"] Jan 06 09:40:37 crc kubenswrapper[4784]: E0106 09:40:37.586951 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7941f03-0fad-4bdc-b664-be56273d0f07" containerName="mariadb-client-5-default" Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.587100 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7941f03-0fad-4bdc-b664-be56273d0f07" containerName="mariadb-client-5-default" Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.587476 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7941f03-0fad-4bdc-b664-be56273d0f07" containerName="mariadb-client-5-default" Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.588724 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.594315 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.770511 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqnfm\" (UniqueName: \"kubernetes.io/projected/2d8399a1-af8d-4db1-b9d7-5dea8b77ce69-kube-api-access-qqnfm\") pod \"mariadb-client-6-default\" (UID: \"2d8399a1-af8d-4db1-b9d7-5dea8b77ce69\") " pod="openstack/mariadb-client-6-default" Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.808534 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25d321ba60e2ae55e54a74644d5d6ec84d1b480194607345a77791227076d3d6" Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.808667 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.872742 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqnfm\" (UniqueName: \"kubernetes.io/projected/2d8399a1-af8d-4db1-b9d7-5dea8b77ce69-kube-api-access-qqnfm\") pod \"mariadb-client-6-default\" (UID: \"2d8399a1-af8d-4db1-b9d7-5dea8b77ce69\") " pod="openstack/mariadb-client-6-default" Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.892418 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqnfm\" (UniqueName: \"kubernetes.io/projected/2d8399a1-af8d-4db1-b9d7-5dea8b77ce69-kube-api-access-qqnfm\") pod \"mariadb-client-6-default\" (UID: \"2d8399a1-af8d-4db1-b9d7-5dea8b77ce69\") " pod="openstack/mariadb-client-6-default" Jan 06 09:40:37 crc kubenswrapper[4784]: I0106 09:40:37.905576 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Jan 06 09:40:38 crc kubenswrapper[4784]: I0106 09:40:38.322888 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:40:38 crc kubenswrapper[4784]: E0106 09:40:38.323529 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:40:38 crc kubenswrapper[4784]: I0106 09:40:38.347064 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7941f03-0fad-4bdc-b664-be56273d0f07" path="/var/lib/kubelet/pods/e7941f03-0fad-4bdc-b664-be56273d0f07/volumes" Jan 06 09:40:38 crc kubenswrapper[4784]: I0106 09:40:38.485219 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Jan 06 09:40:38 crc kubenswrapper[4784]: I0106 09:40:38.818982 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"2d8399a1-af8d-4db1-b9d7-5dea8b77ce69","Type":"ContainerStarted","Data":"390a7ad5be1d855c43fdab3b3d7a26257213f7d5ec53ca258400c02ec03e2fad"} Jan 06 09:40:38 crc kubenswrapper[4784]: I0106 09:40:38.819061 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"2d8399a1-af8d-4db1-b9d7-5dea8b77ce69","Type":"ContainerStarted","Data":"3a109a6c5defb75beb357547897de3fb2354c40f2d0b3a7c23eadba2131e245c"} Jan 06 09:40:38 crc kubenswrapper[4784]: I0106 09:40:38.838244 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client-6-default" podStartSLOduration=1.8382267209999998 podStartE2EDuration="1.838226721s" podCreationTimestamp="2026-01-06 09:40:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:40:38.834369922 +0000 UTC m=+5140.880542799" watchObservedRunningTime="2026-01-06 09:40:38.838226721 +0000 UTC m=+5140.884399558" Jan 06 09:40:39 crc kubenswrapper[4784]: I0106 09:40:39.833845 4784 generic.go:334] "Generic (PLEG): container finished" podID="2d8399a1-af8d-4db1-b9d7-5dea8b77ce69" containerID="390a7ad5be1d855c43fdab3b3d7a26257213f7d5ec53ca258400c02ec03e2fad" exitCode=1 Jan 06 09:40:39 crc kubenswrapper[4784]: I0106 09:40:39.835672 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"2d8399a1-af8d-4db1-b9d7-5dea8b77ce69","Type":"ContainerDied","Data":"390a7ad5be1d855c43fdab3b3d7a26257213f7d5ec53ca258400c02ec03e2fad"} Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.362259 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.412801 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-6-default"] Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.422365 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-6-default"] Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.434099 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqnfm\" (UniqueName: \"kubernetes.io/projected/2d8399a1-af8d-4db1-b9d7-5dea8b77ce69-kube-api-access-qqnfm\") pod \"2d8399a1-af8d-4db1-b9d7-5dea8b77ce69\" (UID: \"2d8399a1-af8d-4db1-b9d7-5dea8b77ce69\") " Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.442782 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d8399a1-af8d-4db1-b9d7-5dea8b77ce69-kube-api-access-qqnfm" (OuterVolumeSpecName: "kube-api-access-qqnfm") pod "2d8399a1-af8d-4db1-b9d7-5dea8b77ce69" (UID: "2d8399a1-af8d-4db1-b9d7-5dea8b77ce69"). InnerVolumeSpecName "kube-api-access-qqnfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.536836 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqnfm\" (UniqueName: \"kubernetes.io/projected/2d8399a1-af8d-4db1-b9d7-5dea8b77ce69-kube-api-access-qqnfm\") on node \"crc\" DevicePath \"\"" Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.607728 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-7-default"] Jan 06 09:40:41 crc kubenswrapper[4784]: E0106 09:40:41.608296 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d8399a1-af8d-4db1-b9d7-5dea8b77ce69" containerName="mariadb-client-6-default" Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.608326 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d8399a1-af8d-4db1-b9d7-5dea8b77ce69" containerName="mariadb-client-6-default" Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.608648 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d8399a1-af8d-4db1-b9d7-5dea8b77ce69" containerName="mariadb-client-6-default" Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.609506 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.618061 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.639256 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cq46l\" (UniqueName: \"kubernetes.io/projected/461c75e9-6ab8-47a9-9a70-fec97dda33db-kube-api-access-cq46l\") pod \"mariadb-client-7-default\" (UID: \"461c75e9-6ab8-47a9-9a70-fec97dda33db\") " pod="openstack/mariadb-client-7-default" Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.741023 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cq46l\" (UniqueName: \"kubernetes.io/projected/461c75e9-6ab8-47a9-9a70-fec97dda33db-kube-api-access-cq46l\") pod \"mariadb-client-7-default\" (UID: \"461c75e9-6ab8-47a9-9a70-fec97dda33db\") " pod="openstack/mariadb-client-7-default" Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.776527 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cq46l\" (UniqueName: \"kubernetes.io/projected/461c75e9-6ab8-47a9-9a70-fec97dda33db-kube-api-access-cq46l\") pod \"mariadb-client-7-default\" (UID: \"461c75e9-6ab8-47a9-9a70-fec97dda33db\") " pod="openstack/mariadb-client-7-default" Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.858708 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3a109a6c5defb75beb357547897de3fb2354c40f2d0b3a7c23eadba2131e245c" Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.858802 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Jan 06 09:40:41 crc kubenswrapper[4784]: I0106 09:40:41.927058 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Jan 06 09:40:42 crc kubenswrapper[4784]: I0106 09:40:42.328280 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d8399a1-af8d-4db1-b9d7-5dea8b77ce69" path="/var/lib/kubelet/pods/2d8399a1-af8d-4db1-b9d7-5dea8b77ce69/volumes" Jan 06 09:40:42 crc kubenswrapper[4784]: I0106 09:40:42.458245 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Jan 06 09:40:42 crc kubenswrapper[4784]: I0106 09:40:42.884148 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"461c75e9-6ab8-47a9-9a70-fec97dda33db","Type":"ContainerDied","Data":"cabfa6043195df7fe4e3a974ab3311b5a520f1fc4a8e33a58a4a038185197c1d"} Jan 06 09:40:42 crc kubenswrapper[4784]: I0106 09:40:42.884054 4784 generic.go:334] "Generic (PLEG): container finished" podID="461c75e9-6ab8-47a9-9a70-fec97dda33db" containerID="cabfa6043195df7fe4e3a974ab3311b5a520f1fc4a8e33a58a4a038185197c1d" exitCode=0 Jan 06 09:40:42 crc kubenswrapper[4784]: I0106 09:40:42.884975 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"461c75e9-6ab8-47a9-9a70-fec97dda33db","Type":"ContainerStarted","Data":"fb99b3f792f3be0a8fa891ff861389b4b173a5ac15cee32e9a1cb90ef543d3dd"} Jan 06 09:40:44 crc kubenswrapper[4784]: I0106 09:40:44.354203 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Jan 06 09:40:44 crc kubenswrapper[4784]: I0106 09:40:44.372878 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-7-default_461c75e9-6ab8-47a9-9a70-fec97dda33db/mariadb-client-7-default/0.log" Jan 06 09:40:44 crc kubenswrapper[4784]: I0106 09:40:44.392567 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cq46l\" (UniqueName: \"kubernetes.io/projected/461c75e9-6ab8-47a9-9a70-fec97dda33db-kube-api-access-cq46l\") pod \"461c75e9-6ab8-47a9-9a70-fec97dda33db\" (UID: \"461c75e9-6ab8-47a9-9a70-fec97dda33db\") " Jan 06 09:40:44 crc kubenswrapper[4784]: I0106 09:40:44.409447 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-7-default"] Jan 06 09:40:44 crc kubenswrapper[4784]: I0106 09:40:44.409591 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/461c75e9-6ab8-47a9-9a70-fec97dda33db-kube-api-access-cq46l" (OuterVolumeSpecName: "kube-api-access-cq46l") pod "461c75e9-6ab8-47a9-9a70-fec97dda33db" (UID: "461c75e9-6ab8-47a9-9a70-fec97dda33db"). InnerVolumeSpecName "kube-api-access-cq46l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:40:44 crc kubenswrapper[4784]: I0106 09:40:44.417362 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-7-default"] Jan 06 09:40:44 crc kubenswrapper[4784]: I0106 09:40:44.494141 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cq46l\" (UniqueName: \"kubernetes.io/projected/461c75e9-6ab8-47a9-9a70-fec97dda33db-kube-api-access-cq46l\") on node \"crc\" DevicePath \"\"" Jan 06 09:40:44 crc kubenswrapper[4784]: I0106 09:40:44.861140 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2"] Jan 06 09:40:44 crc kubenswrapper[4784]: E0106 09:40:44.861510 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="461c75e9-6ab8-47a9-9a70-fec97dda33db" containerName="mariadb-client-7-default" Jan 06 09:40:44 crc kubenswrapper[4784]: I0106 09:40:44.861531 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="461c75e9-6ab8-47a9-9a70-fec97dda33db" containerName="mariadb-client-7-default" Jan 06 09:40:44 crc kubenswrapper[4784]: I0106 09:40:44.861713 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="461c75e9-6ab8-47a9-9a70-fec97dda33db" containerName="mariadb-client-7-default" Jan 06 09:40:44 crc kubenswrapper[4784]: I0106 09:40:44.862253 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Jan 06 09:40:44 crc kubenswrapper[4784]: I0106 09:40:44.880341 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Jan 06 09:40:44 crc kubenswrapper[4784]: I0106 09:40:44.899836 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w84vg\" (UniqueName: \"kubernetes.io/projected/ab71b269-e56e-4974-b0c7-8a88db6f3c97-kube-api-access-w84vg\") pod \"mariadb-client-2\" (UID: \"ab71b269-e56e-4974-b0c7-8a88db6f3c97\") " pod="openstack/mariadb-client-2" Jan 06 09:40:44 crc kubenswrapper[4784]: I0106 09:40:44.907755 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fb99b3f792f3be0a8fa891ff861389b4b173a5ac15cee32e9a1cb90ef543d3dd" Jan 06 09:40:44 crc kubenswrapper[4784]: I0106 09:40:44.907834 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Jan 06 09:40:45 crc kubenswrapper[4784]: I0106 09:40:45.001437 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w84vg\" (UniqueName: \"kubernetes.io/projected/ab71b269-e56e-4974-b0c7-8a88db6f3c97-kube-api-access-w84vg\") pod \"mariadb-client-2\" (UID: \"ab71b269-e56e-4974-b0c7-8a88db6f3c97\") " pod="openstack/mariadb-client-2" Jan 06 09:40:45 crc kubenswrapper[4784]: I0106 09:40:45.032698 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w84vg\" (UniqueName: \"kubernetes.io/projected/ab71b269-e56e-4974-b0c7-8a88db6f3c97-kube-api-access-w84vg\") pod \"mariadb-client-2\" (UID: \"ab71b269-e56e-4974-b0c7-8a88db6f3c97\") " pod="openstack/mariadb-client-2" Jan 06 09:40:45 crc kubenswrapper[4784]: I0106 09:40:45.196098 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Jan 06 09:40:45 crc kubenswrapper[4784]: I0106 09:40:45.779902 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Jan 06 09:40:45 crc kubenswrapper[4784]: W0106 09:40:45.788650 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podab71b269_e56e_4974_b0c7_8a88db6f3c97.slice/crio-7a1076e33df64fb9521a935375bde174c4054d621933ba64d7e9225bbada9b60 WatchSource:0}: Error finding container 7a1076e33df64fb9521a935375bde174c4054d621933ba64d7e9225bbada9b60: Status 404 returned error can't find the container with id 7a1076e33df64fb9521a935375bde174c4054d621933ba64d7e9225bbada9b60 Jan 06 09:40:45 crc kubenswrapper[4784]: I0106 09:40:45.919172 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"ab71b269-e56e-4974-b0c7-8a88db6f3c97","Type":"ContainerStarted","Data":"7a1076e33df64fb9521a935375bde174c4054d621933ba64d7e9225bbada9b60"} Jan 06 09:40:46 crc kubenswrapper[4784]: I0106 09:40:46.330698 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="461c75e9-6ab8-47a9-9a70-fec97dda33db" path="/var/lib/kubelet/pods/461c75e9-6ab8-47a9-9a70-fec97dda33db/volumes" Jan 06 09:40:46 crc kubenswrapper[4784]: I0106 09:40:46.933432 4784 generic.go:334] "Generic (PLEG): container finished" podID="ab71b269-e56e-4974-b0c7-8a88db6f3c97" containerID="0eabe4747a19425ea94ce8e680b71203c70c9756925aa63e565766cbf3a6eedc" exitCode=0 Jan 06 09:40:46 crc kubenswrapper[4784]: I0106 09:40:46.933498 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"ab71b269-e56e-4974-b0c7-8a88db6f3c97","Type":"ContainerDied","Data":"0eabe4747a19425ea94ce8e680b71203c70c9756925aa63e565766cbf3a6eedc"} Jan 06 09:40:48 crc kubenswrapper[4784]: I0106 09:40:48.451951 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Jan 06 09:40:48 crc kubenswrapper[4784]: I0106 09:40:48.474300 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2_ab71b269-e56e-4974-b0c7-8a88db6f3c97/mariadb-client-2/0.log" Jan 06 09:40:48 crc kubenswrapper[4784]: I0106 09:40:48.504028 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2"] Jan 06 09:40:48 crc kubenswrapper[4784]: I0106 09:40:48.509698 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2"] Jan 06 09:40:48 crc kubenswrapper[4784]: I0106 09:40:48.560849 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w84vg\" (UniqueName: \"kubernetes.io/projected/ab71b269-e56e-4974-b0c7-8a88db6f3c97-kube-api-access-w84vg\") pod \"ab71b269-e56e-4974-b0c7-8a88db6f3c97\" (UID: \"ab71b269-e56e-4974-b0c7-8a88db6f3c97\") " Jan 06 09:40:48 crc kubenswrapper[4784]: I0106 09:40:48.568667 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab71b269-e56e-4974-b0c7-8a88db6f3c97-kube-api-access-w84vg" (OuterVolumeSpecName: "kube-api-access-w84vg") pod "ab71b269-e56e-4974-b0c7-8a88db6f3c97" (UID: "ab71b269-e56e-4974-b0c7-8a88db6f3c97"). InnerVolumeSpecName "kube-api-access-w84vg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:40:48 crc kubenswrapper[4784]: I0106 09:40:48.663056 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w84vg\" (UniqueName: \"kubernetes.io/projected/ab71b269-e56e-4974-b0c7-8a88db6f3c97-kube-api-access-w84vg\") on node \"crc\" DevicePath \"\"" Jan 06 09:40:48 crc kubenswrapper[4784]: I0106 09:40:48.956655 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a1076e33df64fb9521a935375bde174c4054d621933ba64d7e9225bbada9b60" Jan 06 09:40:48 crc kubenswrapper[4784]: I0106 09:40:48.956774 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Jan 06 09:40:50 crc kubenswrapper[4784]: I0106 09:40:50.313289 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:40:50 crc kubenswrapper[4784]: E0106 09:40:50.314288 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:40:50 crc kubenswrapper[4784]: I0106 09:40:50.341265 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab71b269-e56e-4974-b0c7-8a88db6f3c97" path="/var/lib/kubelet/pods/ab71b269-e56e-4974-b0c7-8a88db6f3c97/volumes" Jan 06 09:41:05 crc kubenswrapper[4784]: I0106 09:41:05.312364 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:41:05 crc kubenswrapper[4784]: E0106 09:41:05.313604 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:41:19 crc kubenswrapper[4784]: I0106 09:41:19.313332 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:41:19 crc kubenswrapper[4784]: E0106 09:41:19.314831 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:41:30 crc kubenswrapper[4784]: I0106 09:41:30.312603 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:41:30 crc kubenswrapper[4784]: E0106 09:41:30.313517 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:41:41 crc kubenswrapper[4784]: I0106 09:41:41.313895 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:41:41 crc kubenswrapper[4784]: E0106 09:41:41.315233 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:41:54 crc kubenswrapper[4784]: I0106 09:41:54.312481 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:41:54 crc kubenswrapper[4784]: E0106 09:41:54.313651 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:42:09 crc kubenswrapper[4784]: I0106 09:42:09.313078 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:42:09 crc kubenswrapper[4784]: E0106 09:42:09.314202 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:42:17 crc kubenswrapper[4784]: I0106 09:42:17.827396 4784 scope.go:117] "RemoveContainer" containerID="82138bd039035802a484241893b9aeb3b33305b489e5fbbc6d063fe396bb98e1" Jan 06 09:42:22 crc kubenswrapper[4784]: I0106 09:42:22.312656 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:42:22 crc kubenswrapper[4784]: E0106 09:42:22.313423 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:42:34 crc kubenswrapper[4784]: I0106 09:42:34.312768 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:42:34 crc kubenswrapper[4784]: E0106 09:42:34.314013 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:42:45 crc kubenswrapper[4784]: I0106 09:42:45.312586 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:42:45 crc kubenswrapper[4784]: E0106 09:42:45.315104 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:43:00 crc kubenswrapper[4784]: I0106 09:43:00.312837 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:43:00 crc kubenswrapper[4784]: E0106 09:43:00.313823 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:43:11 crc kubenswrapper[4784]: I0106 09:43:11.312847 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:43:11 crc kubenswrapper[4784]: E0106 09:43:11.314063 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:43:25 crc kubenswrapper[4784]: I0106 09:43:25.311770 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:43:25 crc kubenswrapper[4784]: E0106 09:43:25.312449 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:43:39 crc kubenswrapper[4784]: I0106 09:43:39.312595 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:43:39 crc kubenswrapper[4784]: E0106 09:43:39.313811 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:43:50 crc kubenswrapper[4784]: I0106 09:43:50.313631 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:43:50 crc kubenswrapper[4784]: E0106 09:43:50.314748 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:44:03 crc kubenswrapper[4784]: I0106 09:44:03.312865 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:44:03 crc kubenswrapper[4784]: E0106 09:44:03.314051 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:44:14 crc kubenswrapper[4784]: I0106 09:44:14.314224 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:44:14 crc kubenswrapper[4784]: E0106 09:44:14.315302 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:44:17 crc kubenswrapper[4784]: I0106 09:44:17.936783 4784 scope.go:117] "RemoveContainer" containerID="4eaa5aa22a13fdfcffa2198be8918fc6c736f1b7f0e3336ad90d393a283c5b14" Jan 06 09:44:17 crc kubenswrapper[4784]: I0106 09:44:17.973182 4784 scope.go:117] "RemoveContainer" containerID="8c7c4b3cc698340d3b464ef0546fd7a519a244b403baad7d77afd9ce7b0cc788" Jan 06 09:44:29 crc kubenswrapper[4784]: I0106 09:44:29.312358 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:44:29 crc kubenswrapper[4784]: E0106 09:44:29.313515 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:44:30 crc kubenswrapper[4784]: I0106 09:44:30.848029 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-copy-data"] Jan 06 09:44:30 crc kubenswrapper[4784]: E0106 09:44:30.848943 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab71b269-e56e-4974-b0c7-8a88db6f3c97" containerName="mariadb-client-2" Jan 06 09:44:30 crc kubenswrapper[4784]: I0106 09:44:30.848968 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab71b269-e56e-4974-b0c7-8a88db6f3c97" containerName="mariadb-client-2" Jan 06 09:44:30 crc kubenswrapper[4784]: I0106 09:44:30.849232 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab71b269-e56e-4974-b0c7-8a88db6f3c97" containerName="mariadb-client-2" Jan 06 09:44:30 crc kubenswrapper[4784]: I0106 09:44:30.850010 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Jan 06 09:44:30 crc kubenswrapper[4784]: I0106 09:44:30.853154 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-qcr5c" Jan 06 09:44:30 crc kubenswrapper[4784]: I0106 09:44:30.873676 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Jan 06 09:44:31 crc kubenswrapper[4784]: I0106 09:44:31.015805 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-acdb4ccd-8a5b-4721-8b03-62093b08ebf6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-acdb4ccd-8a5b-4721-8b03-62093b08ebf6\") pod \"mariadb-copy-data\" (UID: \"1305a97b-56fd-4686-bfe9-ee6901343241\") " pod="openstack/mariadb-copy-data" Jan 06 09:44:31 crc kubenswrapper[4784]: I0106 09:44:31.015881 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sw7jn\" (UniqueName: \"kubernetes.io/projected/1305a97b-56fd-4686-bfe9-ee6901343241-kube-api-access-sw7jn\") pod \"mariadb-copy-data\" (UID: \"1305a97b-56fd-4686-bfe9-ee6901343241\") " pod="openstack/mariadb-copy-data" Jan 06 09:44:31 crc kubenswrapper[4784]: I0106 09:44:31.117967 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-acdb4ccd-8a5b-4721-8b03-62093b08ebf6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-acdb4ccd-8a5b-4721-8b03-62093b08ebf6\") pod \"mariadb-copy-data\" (UID: \"1305a97b-56fd-4686-bfe9-ee6901343241\") " pod="openstack/mariadb-copy-data" Jan 06 09:44:31 crc kubenswrapper[4784]: I0106 09:44:31.118054 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sw7jn\" (UniqueName: \"kubernetes.io/projected/1305a97b-56fd-4686-bfe9-ee6901343241-kube-api-access-sw7jn\") pod \"mariadb-copy-data\" (UID: \"1305a97b-56fd-4686-bfe9-ee6901343241\") " pod="openstack/mariadb-copy-data" Jan 06 09:44:31 crc kubenswrapper[4784]: I0106 09:44:31.123356 4784 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 06 09:44:31 crc kubenswrapper[4784]: I0106 09:44:31.123410 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-acdb4ccd-8a5b-4721-8b03-62093b08ebf6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-acdb4ccd-8a5b-4721-8b03-62093b08ebf6\") pod \"mariadb-copy-data\" (UID: \"1305a97b-56fd-4686-bfe9-ee6901343241\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/260e29c7bebf852e126e5b92ed90066c368d629c545221131a1eecf7095982b6/globalmount\"" pod="openstack/mariadb-copy-data" Jan 06 09:44:31 crc kubenswrapper[4784]: I0106 09:44:31.161118 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sw7jn\" (UniqueName: \"kubernetes.io/projected/1305a97b-56fd-4686-bfe9-ee6901343241-kube-api-access-sw7jn\") pod \"mariadb-copy-data\" (UID: \"1305a97b-56fd-4686-bfe9-ee6901343241\") " pod="openstack/mariadb-copy-data" Jan 06 09:44:31 crc kubenswrapper[4784]: I0106 09:44:31.168032 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-acdb4ccd-8a5b-4721-8b03-62093b08ebf6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-acdb4ccd-8a5b-4721-8b03-62093b08ebf6\") pod \"mariadb-copy-data\" (UID: \"1305a97b-56fd-4686-bfe9-ee6901343241\") " pod="openstack/mariadb-copy-data" Jan 06 09:44:31 crc kubenswrapper[4784]: I0106 09:44:31.184469 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Jan 06 09:44:31 crc kubenswrapper[4784]: I0106 09:44:31.545771 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Jan 06 09:44:31 crc kubenswrapper[4784]: I0106 09:44:31.930785 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"1305a97b-56fd-4686-bfe9-ee6901343241","Type":"ContainerStarted","Data":"a3207095d937c208b4b830582914aa7f3bc5f2d301af36b7e22c3cece6cbba11"} Jan 06 09:44:31 crc kubenswrapper[4784]: I0106 09:44:31.930830 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"1305a97b-56fd-4686-bfe9-ee6901343241","Type":"ContainerStarted","Data":"cbd48fb10865edd369ba15f80e5daa5dd6dbf08b2fa2102ae56db99a0a41a7d4"} Jan 06 09:44:31 crc kubenswrapper[4784]: I0106 09:44:31.953154 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-copy-data" podStartSLOduration=2.9531152819999997 podStartE2EDuration="2.953115282s" podCreationTimestamp="2026-01-06 09:44:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:44:31.948974804 +0000 UTC m=+5373.995147681" watchObservedRunningTime="2026-01-06 09:44:31.953115282 +0000 UTC m=+5373.999288159" Jan 06 09:44:34 crc kubenswrapper[4784]: I0106 09:44:34.974956 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Jan 06 09:44:34 crc kubenswrapper[4784]: I0106 09:44:34.977277 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 06 09:44:34 crc kubenswrapper[4784]: I0106 09:44:34.988607 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 06 09:44:35 crc kubenswrapper[4784]: I0106 09:44:35.095632 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjjng\" (UniqueName: \"kubernetes.io/projected/13971e65-859a-4a42-b18f-7f5bf642451c-kube-api-access-fjjng\") pod \"mariadb-client\" (UID: \"13971e65-859a-4a42-b18f-7f5bf642451c\") " pod="openstack/mariadb-client" Jan 06 09:44:35 crc kubenswrapper[4784]: I0106 09:44:35.197490 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjjng\" (UniqueName: \"kubernetes.io/projected/13971e65-859a-4a42-b18f-7f5bf642451c-kube-api-access-fjjng\") pod \"mariadb-client\" (UID: \"13971e65-859a-4a42-b18f-7f5bf642451c\") " pod="openstack/mariadb-client" Jan 06 09:44:35 crc kubenswrapper[4784]: I0106 09:44:35.225201 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjjng\" (UniqueName: \"kubernetes.io/projected/13971e65-859a-4a42-b18f-7f5bf642451c-kube-api-access-fjjng\") pod \"mariadb-client\" (UID: \"13971e65-859a-4a42-b18f-7f5bf642451c\") " pod="openstack/mariadb-client" Jan 06 09:44:35 crc kubenswrapper[4784]: I0106 09:44:35.309052 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 06 09:44:35 crc kubenswrapper[4784]: I0106 09:44:35.586432 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 06 09:44:35 crc kubenswrapper[4784]: I0106 09:44:35.975181 4784 generic.go:334] "Generic (PLEG): container finished" podID="13971e65-859a-4a42-b18f-7f5bf642451c" containerID="6cb1d09301f5dfb7866d4c7a5525b27aeab64395c8bf2d1982b1623985c06182" exitCode=0 Jan 06 09:44:35 crc kubenswrapper[4784]: I0106 09:44:35.975271 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"13971e65-859a-4a42-b18f-7f5bf642451c","Type":"ContainerDied","Data":"6cb1d09301f5dfb7866d4c7a5525b27aeab64395c8bf2d1982b1623985c06182"} Jan 06 09:44:35 crc kubenswrapper[4784]: I0106 09:44:35.975671 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"13971e65-859a-4a42-b18f-7f5bf642451c","Type":"ContainerStarted","Data":"a12a6954671a8a9a96fa5c9aa94e7b3a431b4c540b3328dab7933de91ca2f052"} Jan 06 09:44:37 crc kubenswrapper[4784]: I0106 09:44:37.401007 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 06 09:44:37 crc kubenswrapper[4784]: I0106 09:44:37.439582 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_13971e65-859a-4a42-b18f-7f5bf642451c/mariadb-client/0.log" Jan 06 09:44:37 crc kubenswrapper[4784]: I0106 09:44:37.471300 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 06 09:44:37 crc kubenswrapper[4784]: I0106 09:44:37.478456 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Jan 06 09:44:37 crc kubenswrapper[4784]: I0106 09:44:37.537695 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjjng\" (UniqueName: \"kubernetes.io/projected/13971e65-859a-4a42-b18f-7f5bf642451c-kube-api-access-fjjng\") pod \"13971e65-859a-4a42-b18f-7f5bf642451c\" (UID: \"13971e65-859a-4a42-b18f-7f5bf642451c\") " Jan 06 09:44:37 crc kubenswrapper[4784]: I0106 09:44:37.547713 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13971e65-859a-4a42-b18f-7f5bf642451c-kube-api-access-fjjng" (OuterVolumeSpecName: "kube-api-access-fjjng") pod "13971e65-859a-4a42-b18f-7f5bf642451c" (UID: "13971e65-859a-4a42-b18f-7f5bf642451c"). InnerVolumeSpecName "kube-api-access-fjjng". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:44:37 crc kubenswrapper[4784]: I0106 09:44:37.640122 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjjng\" (UniqueName: \"kubernetes.io/projected/13971e65-859a-4a42-b18f-7f5bf642451c-kube-api-access-fjjng\") on node \"crc\" DevicePath \"\"" Jan 06 09:44:37 crc kubenswrapper[4784]: I0106 09:44:37.665171 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Jan 06 09:44:37 crc kubenswrapper[4784]: E0106 09:44:37.665673 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13971e65-859a-4a42-b18f-7f5bf642451c" containerName="mariadb-client" Jan 06 09:44:37 crc kubenswrapper[4784]: I0106 09:44:37.665706 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="13971e65-859a-4a42-b18f-7f5bf642451c" containerName="mariadb-client" Jan 06 09:44:37 crc kubenswrapper[4784]: I0106 09:44:37.666009 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="13971e65-859a-4a42-b18f-7f5bf642451c" containerName="mariadb-client" Jan 06 09:44:37 crc kubenswrapper[4784]: I0106 09:44:37.666848 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 06 09:44:37 crc kubenswrapper[4784]: I0106 09:44:37.678403 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 06 09:44:37 crc kubenswrapper[4784]: I0106 09:44:37.844160 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8zdr\" (UniqueName: \"kubernetes.io/projected/31d25d93-eb4f-45ab-9bd9-26112a40e1f5-kube-api-access-f8zdr\") pod \"mariadb-client\" (UID: \"31d25d93-eb4f-45ab-9bd9-26112a40e1f5\") " pod="openstack/mariadb-client" Jan 06 09:44:37 crc kubenswrapper[4784]: I0106 09:44:37.946155 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8zdr\" (UniqueName: \"kubernetes.io/projected/31d25d93-eb4f-45ab-9bd9-26112a40e1f5-kube-api-access-f8zdr\") pod \"mariadb-client\" (UID: \"31d25d93-eb4f-45ab-9bd9-26112a40e1f5\") " pod="openstack/mariadb-client" Jan 06 09:44:37 crc kubenswrapper[4784]: I0106 09:44:37.978170 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8zdr\" (UniqueName: \"kubernetes.io/projected/31d25d93-eb4f-45ab-9bd9-26112a40e1f5-kube-api-access-f8zdr\") pod \"mariadb-client\" (UID: \"31d25d93-eb4f-45ab-9bd9-26112a40e1f5\") " pod="openstack/mariadb-client" Jan 06 09:44:38 crc kubenswrapper[4784]: I0106 09:44:38.000449 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a12a6954671a8a9a96fa5c9aa94e7b3a431b4c540b3328dab7933de91ca2f052" Jan 06 09:44:38 crc kubenswrapper[4784]: I0106 09:44:38.000611 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 06 09:44:38 crc kubenswrapper[4784]: I0106 09:44:38.012904 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 06 09:44:38 crc kubenswrapper[4784]: I0106 09:44:38.037385 4784 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/mariadb-client" oldPodUID="13971e65-859a-4a42-b18f-7f5bf642451c" podUID="31d25d93-eb4f-45ab-9bd9-26112a40e1f5" Jan 06 09:44:38 crc kubenswrapper[4784]: I0106 09:44:38.293373 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 06 09:44:38 crc kubenswrapper[4784]: I0106 09:44:38.329379 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13971e65-859a-4a42-b18f-7f5bf642451c" path="/var/lib/kubelet/pods/13971e65-859a-4a42-b18f-7f5bf642451c/volumes" Jan 06 09:44:39 crc kubenswrapper[4784]: I0106 09:44:39.017923 4784 generic.go:334] "Generic (PLEG): container finished" podID="31d25d93-eb4f-45ab-9bd9-26112a40e1f5" containerID="5bb82a00eda52a8be32fae091f328c58983d956b96488dc5dedb46493f5a515a" exitCode=0 Jan 06 09:44:39 crc kubenswrapper[4784]: I0106 09:44:39.018030 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"31d25d93-eb4f-45ab-9bd9-26112a40e1f5","Type":"ContainerDied","Data":"5bb82a00eda52a8be32fae091f328c58983d956b96488dc5dedb46493f5a515a"} Jan 06 09:44:39 crc kubenswrapper[4784]: I0106 09:44:39.018071 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"31d25d93-eb4f-45ab-9bd9-26112a40e1f5","Type":"ContainerStarted","Data":"a9f90651917c72e2b1f22443895ee7b46e203f41dc464c91f1f7226bc7b6a7e4"} Jan 06 09:44:40 crc kubenswrapper[4784]: I0106 09:44:40.451316 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 06 09:44:40 crc kubenswrapper[4784]: I0106 09:44:40.472709 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_31d25d93-eb4f-45ab-9bd9-26112a40e1f5/mariadb-client/0.log" Jan 06 09:44:40 crc kubenswrapper[4784]: I0106 09:44:40.510976 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 06 09:44:40 crc kubenswrapper[4784]: I0106 09:44:40.519883 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Jan 06 09:44:40 crc kubenswrapper[4784]: I0106 09:44:40.628419 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8zdr\" (UniqueName: \"kubernetes.io/projected/31d25d93-eb4f-45ab-9bd9-26112a40e1f5-kube-api-access-f8zdr\") pod \"31d25d93-eb4f-45ab-9bd9-26112a40e1f5\" (UID: \"31d25d93-eb4f-45ab-9bd9-26112a40e1f5\") " Jan 06 09:44:40 crc kubenswrapper[4784]: I0106 09:44:40.637216 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d25d93-eb4f-45ab-9bd9-26112a40e1f5-kube-api-access-f8zdr" (OuterVolumeSpecName: "kube-api-access-f8zdr") pod "31d25d93-eb4f-45ab-9bd9-26112a40e1f5" (UID: "31d25d93-eb4f-45ab-9bd9-26112a40e1f5"). InnerVolumeSpecName "kube-api-access-f8zdr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:44:40 crc kubenswrapper[4784]: I0106 09:44:40.731398 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8zdr\" (UniqueName: \"kubernetes.io/projected/31d25d93-eb4f-45ab-9bd9-26112a40e1f5-kube-api-access-f8zdr\") on node \"crc\" DevicePath \"\"" Jan 06 09:44:41 crc kubenswrapper[4784]: I0106 09:44:41.040054 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9f90651917c72e2b1f22443895ee7b46e203f41dc464c91f1f7226bc7b6a7e4" Jan 06 09:44:41 crc kubenswrapper[4784]: I0106 09:44:41.040182 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 06 09:44:41 crc kubenswrapper[4784]: I0106 09:44:41.313029 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:44:41 crc kubenswrapper[4784]: E0106 09:44:41.313358 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:44:42 crc kubenswrapper[4784]: I0106 09:44:42.325528 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d25d93-eb4f-45ab-9bd9-26112a40e1f5" path="/var/lib/kubelet/pods/31d25d93-eb4f-45ab-9bd9-26112a40e1f5/volumes" Jan 06 09:44:54 crc kubenswrapper[4784]: I0106 09:44:54.314148 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:44:55 crc kubenswrapper[4784]: I0106 09:44:55.214517 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"72e2ee1df1508c45b4af0f614c4678cd408cada5da9dc40a3583054dd2332a7e"} Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.158030 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn"] Jan 06 09:45:00 crc kubenswrapper[4784]: E0106 09:45:00.159321 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31d25d93-eb4f-45ab-9bd9-26112a40e1f5" containerName="mariadb-client" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.159345 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="31d25d93-eb4f-45ab-9bd9-26112a40e1f5" containerName="mariadb-client" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.159617 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="31d25d93-eb4f-45ab-9bd9-26112a40e1f5" containerName="mariadb-client" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.160479 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.167565 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn"] Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.169891 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.169929 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.330118 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbptq\" (UniqueName: \"kubernetes.io/projected/05868ae7-17ab-4dba-89e2-ee1361ec4e11-kube-api-access-jbptq\") pod \"collect-profiles-29461545-c4vzn\" (UID: \"05868ae7-17ab-4dba-89e2-ee1361ec4e11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.330439 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/05868ae7-17ab-4dba-89e2-ee1361ec4e11-config-volume\") pod \"collect-profiles-29461545-c4vzn\" (UID: \"05868ae7-17ab-4dba-89e2-ee1361ec4e11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.330585 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/05868ae7-17ab-4dba-89e2-ee1361ec4e11-secret-volume\") pod \"collect-profiles-29461545-c4vzn\" (UID: \"05868ae7-17ab-4dba-89e2-ee1361ec4e11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.432687 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/05868ae7-17ab-4dba-89e2-ee1361ec4e11-config-volume\") pod \"collect-profiles-29461545-c4vzn\" (UID: \"05868ae7-17ab-4dba-89e2-ee1361ec4e11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.432777 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/05868ae7-17ab-4dba-89e2-ee1361ec4e11-secret-volume\") pod \"collect-profiles-29461545-c4vzn\" (UID: \"05868ae7-17ab-4dba-89e2-ee1361ec4e11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.433056 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbptq\" (UniqueName: \"kubernetes.io/projected/05868ae7-17ab-4dba-89e2-ee1361ec4e11-kube-api-access-jbptq\") pod \"collect-profiles-29461545-c4vzn\" (UID: \"05868ae7-17ab-4dba-89e2-ee1361ec4e11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.434055 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/05868ae7-17ab-4dba-89e2-ee1361ec4e11-config-volume\") pod \"collect-profiles-29461545-c4vzn\" (UID: \"05868ae7-17ab-4dba-89e2-ee1361ec4e11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.440480 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/05868ae7-17ab-4dba-89e2-ee1361ec4e11-secret-volume\") pod \"collect-profiles-29461545-c4vzn\" (UID: \"05868ae7-17ab-4dba-89e2-ee1361ec4e11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.461711 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbptq\" (UniqueName: \"kubernetes.io/projected/05868ae7-17ab-4dba-89e2-ee1361ec4e11-kube-api-access-jbptq\") pod \"collect-profiles-29461545-c4vzn\" (UID: \"05868ae7-17ab-4dba-89e2-ee1361ec4e11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.492283 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" Jan 06 09:45:00 crc kubenswrapper[4784]: I0106 09:45:00.955079 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn"] Jan 06 09:45:01 crc kubenswrapper[4784]: I0106 09:45:01.282895 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" event={"ID":"05868ae7-17ab-4dba-89e2-ee1361ec4e11","Type":"ContainerStarted","Data":"4278ad8e20d5fc8be84df1c466c02a3cec2930095b1d2698c34945403acd3012"} Jan 06 09:45:01 crc kubenswrapper[4784]: I0106 09:45:01.282952 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" event={"ID":"05868ae7-17ab-4dba-89e2-ee1361ec4e11","Type":"ContainerStarted","Data":"f87a0f2bb9417a067292c496901d9205df81b3acafc95432361efeb936fde751"} Jan 06 09:45:01 crc kubenswrapper[4784]: I0106 09:45:01.308358 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" podStartSLOduration=1.308336138 podStartE2EDuration="1.308336138s" podCreationTimestamp="2026-01-06 09:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:45:01.308286687 +0000 UTC m=+5403.354459554" watchObservedRunningTime="2026-01-06 09:45:01.308336138 +0000 UTC m=+5403.354508975" Jan 06 09:45:02 crc kubenswrapper[4784]: I0106 09:45:02.295374 4784 generic.go:334] "Generic (PLEG): container finished" podID="05868ae7-17ab-4dba-89e2-ee1361ec4e11" containerID="4278ad8e20d5fc8be84df1c466c02a3cec2930095b1d2698c34945403acd3012" exitCode=0 Jan 06 09:45:02 crc kubenswrapper[4784]: I0106 09:45:02.295433 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" event={"ID":"05868ae7-17ab-4dba-89e2-ee1361ec4e11","Type":"ContainerDied","Data":"4278ad8e20d5fc8be84df1c466c02a3cec2930095b1d2698c34945403acd3012"} Jan 06 09:45:03 crc kubenswrapper[4784]: I0106 09:45:03.686711 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" Jan 06 09:45:03 crc kubenswrapper[4784]: I0106 09:45:03.890234 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jbptq\" (UniqueName: \"kubernetes.io/projected/05868ae7-17ab-4dba-89e2-ee1361ec4e11-kube-api-access-jbptq\") pod \"05868ae7-17ab-4dba-89e2-ee1361ec4e11\" (UID: \"05868ae7-17ab-4dba-89e2-ee1361ec4e11\") " Jan 06 09:45:03 crc kubenswrapper[4784]: I0106 09:45:03.890438 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/05868ae7-17ab-4dba-89e2-ee1361ec4e11-config-volume\") pod \"05868ae7-17ab-4dba-89e2-ee1361ec4e11\" (UID: \"05868ae7-17ab-4dba-89e2-ee1361ec4e11\") " Jan 06 09:45:03 crc kubenswrapper[4784]: I0106 09:45:03.890582 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/05868ae7-17ab-4dba-89e2-ee1361ec4e11-secret-volume\") pod \"05868ae7-17ab-4dba-89e2-ee1361ec4e11\" (UID: \"05868ae7-17ab-4dba-89e2-ee1361ec4e11\") " Jan 06 09:45:03 crc kubenswrapper[4784]: I0106 09:45:03.891873 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05868ae7-17ab-4dba-89e2-ee1361ec4e11-config-volume" (OuterVolumeSpecName: "config-volume") pod "05868ae7-17ab-4dba-89e2-ee1361ec4e11" (UID: "05868ae7-17ab-4dba-89e2-ee1361ec4e11"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:45:03 crc kubenswrapper[4784]: I0106 09:45:03.897533 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05868ae7-17ab-4dba-89e2-ee1361ec4e11-kube-api-access-jbptq" (OuterVolumeSpecName: "kube-api-access-jbptq") pod "05868ae7-17ab-4dba-89e2-ee1361ec4e11" (UID: "05868ae7-17ab-4dba-89e2-ee1361ec4e11"). InnerVolumeSpecName "kube-api-access-jbptq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:45:03 crc kubenswrapper[4784]: I0106 09:45:03.900664 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05868ae7-17ab-4dba-89e2-ee1361ec4e11-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "05868ae7-17ab-4dba-89e2-ee1361ec4e11" (UID: "05868ae7-17ab-4dba-89e2-ee1361ec4e11"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:45:03 crc kubenswrapper[4784]: I0106 09:45:03.994140 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jbptq\" (UniqueName: \"kubernetes.io/projected/05868ae7-17ab-4dba-89e2-ee1361ec4e11-kube-api-access-jbptq\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:03 crc kubenswrapper[4784]: I0106 09:45:03.995116 4784 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/05868ae7-17ab-4dba-89e2-ee1361ec4e11-config-volume\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:03 crc kubenswrapper[4784]: I0106 09:45:03.995150 4784 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/05868ae7-17ab-4dba-89e2-ee1361ec4e11-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:04 crc kubenswrapper[4784]: I0106 09:45:04.315251 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" Jan 06 09:45:04 crc kubenswrapper[4784]: I0106 09:45:04.328524 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29461545-c4vzn" event={"ID":"05868ae7-17ab-4dba-89e2-ee1361ec4e11","Type":"ContainerDied","Data":"f87a0f2bb9417a067292c496901d9205df81b3acafc95432361efeb936fde751"} Jan 06 09:45:04 crc kubenswrapper[4784]: I0106 09:45:04.328604 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f87a0f2bb9417a067292c496901d9205df81b3acafc95432361efeb936fde751" Jan 06 09:45:04 crc kubenswrapper[4784]: I0106 09:45:04.412235 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l"] Jan 06 09:45:04 crc kubenswrapper[4784]: I0106 09:45:04.420528 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29461500-k5j6l"] Jan 06 09:45:06 crc kubenswrapper[4784]: I0106 09:45:06.331493 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca13c898-752c-4644-b976-ca654199d139" path="/var/lib/kubelet/pods/ca13c898-752c-4644-b976-ca654199d139/volumes" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.436736 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 06 09:45:14 crc kubenswrapper[4784]: E0106 09:45:14.437799 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05868ae7-17ab-4dba-89e2-ee1361ec4e11" containerName="collect-profiles" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.437822 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="05868ae7-17ab-4dba-89e2-ee1361ec4e11" containerName="collect-profiles" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.438114 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="05868ae7-17ab-4dba-89e2-ee1361ec4e11" containerName="collect-profiles" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.439605 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.442576 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.443008 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.443025 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.443255 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-h8qb2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.460692 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.460959 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.493659 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-1"] Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.495297 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.504513 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-2"] Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.505955 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.513332 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.534702 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.565477 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/87d0ad06-efd9-4785-9dcd-6a215df2068e-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.565608 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87d0ad06-efd9-4785-9dcd-6a215df2068e-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.565643 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/87d0ad06-efd9-4785-9dcd-6a215df2068e-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.565727 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/87d0ad06-efd9-4785-9dcd-6a215df2068e-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.565772 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87d0ad06-efd9-4785-9dcd-6a215df2068e-config\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.565820 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/87d0ad06-efd9-4785-9dcd-6a215df2068e-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.565866 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5m2q\" (UniqueName: \"kubernetes.io/projected/87d0ad06-efd9-4785-9dcd-6a215df2068e-kube-api-access-l5m2q\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.565913 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-cb8771b6-f6ab-4b17-a779-bcf2498ab77b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cb8771b6-f6ab-4b17-a779-bcf2498ab77b\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667165 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/52b356d6-b34e-4eac-9f70-5138fe492d64-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667215 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/52b356d6-b34e-4eac-9f70-5138fe492d64-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667248 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/87d0ad06-efd9-4785-9dcd-6a215df2068e-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667470 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0428c32c-f26c-4baa-8914-1079f1097a09-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667530 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrtjg\" (UniqueName: \"kubernetes.io/projected/0428c32c-f26c-4baa-8914-1079f1097a09-kube-api-access-xrtjg\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667596 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87d0ad06-efd9-4785-9dcd-6a215df2068e-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667630 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/87d0ad06-efd9-4785-9dcd-6a215df2068e-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667708 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0428c32c-f26c-4baa-8914-1079f1097a09-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667739 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52b356d6-b34e-4eac-9f70-5138fe492d64-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667769 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52b356d6-b34e-4eac-9f70-5138fe492d64-config\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667816 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-be1abbdb-3fab-4f5a-aef5-245d35d37be9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-be1abbdb-3fab-4f5a-aef5-245d35d37be9\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667837 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/87d0ad06-efd9-4785-9dcd-6a215df2068e-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667875 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87d0ad06-efd9-4785-9dcd-6a215df2068e-config\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667896 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0428c32c-f26c-4baa-8914-1079f1097a09-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667935 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/87d0ad06-efd9-4785-9dcd-6a215df2068e-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.667951 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/52b356d6-b34e-4eac-9f70-5138fe492d64-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.668022 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/52b356d6-b34e-4eac-9f70-5138fe492d64-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.668074 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-db919c80-25bf-4492-a274-044884f68711\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db919c80-25bf-4492-a274-044884f68711\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.668126 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5m2q\" (UniqueName: \"kubernetes.io/projected/87d0ad06-efd9-4785-9dcd-6a215df2068e-kube-api-access-l5m2q\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.668156 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5fdc\" (UniqueName: \"kubernetes.io/projected/52b356d6-b34e-4eac-9f70-5138fe492d64-kube-api-access-f5fdc\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.668192 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0428c32c-f26c-4baa-8914-1079f1097a09-config\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.668235 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-cb8771b6-f6ab-4b17-a779-bcf2498ab77b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cb8771b6-f6ab-4b17-a779-bcf2498ab77b\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.668257 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/87d0ad06-efd9-4785-9dcd-6a215df2068e-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.668271 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0428c32c-f26c-4baa-8914-1079f1097a09-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.668335 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0428c32c-f26c-4baa-8914-1079f1097a09-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.668820 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87d0ad06-efd9-4785-9dcd-6a215df2068e-config\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.668895 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/87d0ad06-efd9-4785-9dcd-6a215df2068e-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.675776 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87d0ad06-efd9-4785-9dcd-6a215df2068e-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.676675 4784 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.676706 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-cb8771b6-f6ab-4b17-a779-bcf2498ab77b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cb8771b6-f6ab-4b17-a779-bcf2498ab77b\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/64176c02e8418bbee6c9e9f9fe1668d90a6030a37b6fbf9615fcb359c65eb180/globalmount\"" pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.677758 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/87d0ad06-efd9-4785-9dcd-6a215df2068e-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.683126 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/87d0ad06-efd9-4785-9dcd-6a215df2068e-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.690691 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5m2q\" (UniqueName: \"kubernetes.io/projected/87d0ad06-efd9-4785-9dcd-6a215df2068e-kube-api-access-l5m2q\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.758800 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-cb8771b6-f6ab-4b17-a779-bcf2498ab77b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cb8771b6-f6ab-4b17-a779-bcf2498ab77b\") pod \"ovsdbserver-nb-0\" (UID: \"87d0ad06-efd9-4785-9dcd-6a215df2068e\") " pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.769640 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0428c32c-f26c-4baa-8914-1079f1097a09-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.769850 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/52b356d6-b34e-4eac-9f70-5138fe492d64-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.769976 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/52b356d6-b34e-4eac-9f70-5138fe492d64-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.770119 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0428c32c-f26c-4baa-8914-1079f1097a09-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.770234 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrtjg\" (UniqueName: \"kubernetes.io/projected/0428c32c-f26c-4baa-8914-1079f1097a09-kube-api-access-xrtjg\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.770353 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0428c32c-f26c-4baa-8914-1079f1097a09-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.771064 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0428c32c-f26c-4baa-8914-1079f1097a09-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.771122 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/52b356d6-b34e-4eac-9f70-5138fe492d64-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.770763 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52b356d6-b34e-4eac-9f70-5138fe492d64-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.771312 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52b356d6-b34e-4eac-9f70-5138fe492d64-config\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.771394 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-be1abbdb-3fab-4f5a-aef5-245d35d37be9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-be1abbdb-3fab-4f5a-aef5-245d35d37be9\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.771472 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0428c32c-f26c-4baa-8914-1079f1097a09-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.772444 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/52b356d6-b34e-4eac-9f70-5138fe492d64-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.772622 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/52b356d6-b34e-4eac-9f70-5138fe492d64-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.773005 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-db919c80-25bf-4492-a274-044884f68711\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db919c80-25bf-4492-a274-044884f68711\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.773093 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5fdc\" (UniqueName: \"kubernetes.io/projected/52b356d6-b34e-4eac-9f70-5138fe492d64-kube-api-access-f5fdc\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.773196 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0428c32c-f26c-4baa-8914-1079f1097a09-config\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.773272 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0428c32c-f26c-4baa-8914-1079f1097a09-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.772310 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52b356d6-b34e-4eac-9f70-5138fe492d64-config\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.771497 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0428c32c-f26c-4baa-8914-1079f1097a09-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.773624 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0428c32c-f26c-4baa-8914-1079f1097a09-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.772795 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/52b356d6-b34e-4eac-9f70-5138fe492d64-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.775155 4784 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.775211 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-be1abbdb-3fab-4f5a-aef5-245d35d37be9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-be1abbdb-3fab-4f5a-aef5-245d35d37be9\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/04672e03c0d7847ceda86e09bc450083d209db7d446f54d3040849560ebebb35/globalmount\"" pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.775424 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0428c32c-f26c-4baa-8914-1079f1097a09-config\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.775555 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52b356d6-b34e-4eac-9f70-5138fe492d64-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.776296 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/52b356d6-b34e-4eac-9f70-5138fe492d64-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.777238 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0428c32c-f26c-4baa-8914-1079f1097a09-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.778623 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0428c32c-f26c-4baa-8914-1079f1097a09-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.779071 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/52b356d6-b34e-4eac-9f70-5138fe492d64-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.785484 4784 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.785527 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-db919c80-25bf-4492-a274-044884f68711\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db919c80-25bf-4492-a274-044884f68711\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1685024b7bb03371ed765894a7e19983a3e233c2d7ecc0b5084fc10bccce4529/globalmount\"" pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.787904 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.788869 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5fdc\" (UniqueName: \"kubernetes.io/projected/52b356d6-b34e-4eac-9f70-5138fe492d64-kube-api-access-f5fdc\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.791070 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrtjg\" (UniqueName: \"kubernetes.io/projected/0428c32c-f26c-4baa-8914-1079f1097a09-kube-api-access-xrtjg\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.814638 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-be1abbdb-3fab-4f5a-aef5-245d35d37be9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-be1abbdb-3fab-4f5a-aef5-245d35d37be9\") pod \"ovsdbserver-nb-2\" (UID: \"52b356d6-b34e-4eac-9f70-5138fe492d64\") " pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.825323 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-db919c80-25bf-4492-a274-044884f68711\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db919c80-25bf-4492-a274-044884f68711\") pod \"ovsdbserver-nb-1\" (UID: \"0428c32c-f26c-4baa-8914-1079f1097a09\") " pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:14 crc kubenswrapper[4784]: I0106 09:45:14.843846 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:15 crc kubenswrapper[4784]: I0106 09:45:15.123857 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:15 crc kubenswrapper[4784]: I0106 09:45:15.391397 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 06 09:45:15 crc kubenswrapper[4784]: I0106 09:45:15.427420 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"87d0ad06-efd9-4785-9dcd-6a215df2068e","Type":"ContainerStarted","Data":"e59fe6c750a9867558fe5e1244303e8d69418dc3b4b4ea762ffd3656c9b1e54f"} Jan 06 09:45:15 crc kubenswrapper[4784]: I0106 09:45:15.663125 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.042198 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.043859 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.046576 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.046665 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.046603 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-ddznm" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.051799 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.067298 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.067352 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-2"] Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.069085 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.092959 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-1"] Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.094292 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.136010 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.141875 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.193705 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/59ff486b-0d07-48c7-bb88-0d13a30eaade-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.193764 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjrm7\" (UniqueName: \"kubernetes.io/projected/672bf4f8-dc22-458b-a552-e60871c86e17-kube-api-access-jjrm7\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.193788 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.193813 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-82ffe6a0-6fd2-442b-b488-f9203a983eb1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-82ffe6a0-6fd2-442b-b488-f9203a983eb1\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.193840 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cq95p\" (UniqueName: \"kubernetes.io/projected/59ff486b-0d07-48c7-bb88-0d13a30eaade-kube-api-access-cq95p\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.193862 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59ff486b-0d07-48c7-bb88-0d13a30eaade-config\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.193878 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.193894 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/59ff486b-0d07-48c7-bb88-0d13a30eaade-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.193911 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-e34cd463-7321-4573-a1d2-c833c5651639\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e34cd463-7321-4573-a1d2-c833c5651639\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.193926 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmrrw\" (UniqueName: \"kubernetes.io/projected/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-kube-api-access-cmrrw\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.193945 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/59ff486b-0d07-48c7-bb88-0d13a30eaade-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.193962 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/672bf4f8-dc22-458b-a552-e60871c86e17-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.193981 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59ff486b-0d07-48c7-bb88-0d13a30eaade-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.193997 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/672bf4f8-dc22-458b-a552-e60871c86e17-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.194013 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/672bf4f8-dc22-458b-a552-e60871c86e17-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.194035 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b580d2bd-e19f-4d83-89b2-48582bb28807\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b580d2bd-e19f-4d83-89b2-48582bb28807\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.194048 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/672bf4f8-dc22-458b-a552-e60871c86e17-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.194068 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.194090 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/672bf4f8-dc22-458b-a552-e60871c86e17-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.194112 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/59ff486b-0d07-48c7-bb88-0d13a30eaade-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.194137 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.194160 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/672bf4f8-dc22-458b-a552-e60871c86e17-config\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.194174 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.194193 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-config\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296457 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/672bf4f8-dc22-458b-a552-e60871c86e17-config\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296513 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296567 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-config\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296602 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/59ff486b-0d07-48c7-bb88-0d13a30eaade-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296639 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjrm7\" (UniqueName: \"kubernetes.io/projected/672bf4f8-dc22-458b-a552-e60871c86e17-kube-api-access-jjrm7\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296661 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296691 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-82ffe6a0-6fd2-442b-b488-f9203a983eb1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-82ffe6a0-6fd2-442b-b488-f9203a983eb1\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296723 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cq95p\" (UniqueName: \"kubernetes.io/projected/59ff486b-0d07-48c7-bb88-0d13a30eaade-kube-api-access-cq95p\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296749 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59ff486b-0d07-48c7-bb88-0d13a30eaade-config\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296769 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296792 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/59ff486b-0d07-48c7-bb88-0d13a30eaade-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296817 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-e34cd463-7321-4573-a1d2-c833c5651639\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e34cd463-7321-4573-a1d2-c833c5651639\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296840 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmrrw\" (UniqueName: \"kubernetes.io/projected/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-kube-api-access-cmrrw\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296866 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/59ff486b-0d07-48c7-bb88-0d13a30eaade-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296887 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/672bf4f8-dc22-458b-a552-e60871c86e17-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296911 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59ff486b-0d07-48c7-bb88-0d13a30eaade-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296932 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/672bf4f8-dc22-458b-a552-e60871c86e17-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296956 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/672bf4f8-dc22-458b-a552-e60871c86e17-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.296987 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b580d2bd-e19f-4d83-89b2-48582bb28807\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b580d2bd-e19f-4d83-89b2-48582bb28807\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.297006 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/672bf4f8-dc22-458b-a552-e60871c86e17-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.297033 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.297061 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/672bf4f8-dc22-458b-a552-e60871c86e17-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.297089 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/59ff486b-0d07-48c7-bb88-0d13a30eaade-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.297124 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.299611 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.300056 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/59ff486b-0d07-48c7-bb88-0d13a30eaade-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.300748 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.300843 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.301223 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/672bf4f8-dc22-458b-a552-e60871c86e17-config\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.301626 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-config\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.301859 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/672bf4f8-dc22-458b-a552-e60871c86e17-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.302027 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/59ff486b-0d07-48c7-bb88-0d13a30eaade-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.302596 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59ff486b-0d07-48c7-bb88-0d13a30eaade-config\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.304185 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/672bf4f8-dc22-458b-a552-e60871c86e17-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.305378 4784 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.305423 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-82ffe6a0-6fd2-442b-b488-f9203a983eb1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-82ffe6a0-6fd2-442b-b488-f9203a983eb1\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/02ddff4ec35e80a29986a110a3f4a2abd2141e83115ee72a5febdab2f548d526/globalmount\"" pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.305770 4784 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.305905 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-e34cd463-7321-4573-a1d2-c833c5651639\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e34cd463-7321-4573-a1d2-c833c5651639\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/044c8082f57f06597b7339336769528a90d86cbe53c521f11d24547c68b7f203/globalmount\"" pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.306013 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/672bf4f8-dc22-458b-a552-e60871c86e17-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.306234 4784 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.306276 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b580d2bd-e19f-4d83-89b2-48582bb28807\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b580d2bd-e19f-4d83-89b2-48582bb28807\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f5e50d92d587cab25340eba6746d28c92bfe0a82c1a1a21dd07b7809d047ce18/globalmount\"" pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.306627 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/672bf4f8-dc22-458b-a552-e60871c86e17-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.306805 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/672bf4f8-dc22-458b-a552-e60871c86e17-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.307501 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.308647 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59ff486b-0d07-48c7-bb88-0d13a30eaade-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.310202 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/59ff486b-0d07-48c7-bb88-0d13a30eaade-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.314703 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.314788 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/59ff486b-0d07-48c7-bb88-0d13a30eaade-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.322964 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmrrw\" (UniqueName: \"kubernetes.io/projected/6e1b3eaf-d1a3-416d-97f8-3a889e94f595-kube-api-access-cmrrw\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.329186 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjrm7\" (UniqueName: \"kubernetes.io/projected/672bf4f8-dc22-458b-a552-e60871c86e17-kube-api-access-jjrm7\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.331507 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cq95p\" (UniqueName: \"kubernetes.io/projected/59ff486b-0d07-48c7-bb88-0d13a30eaade-kube-api-access-cq95p\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.353325 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b580d2bd-e19f-4d83-89b2-48582bb28807\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b580d2bd-e19f-4d83-89b2-48582bb28807\") pod \"ovsdbserver-sb-0\" (UID: \"672bf4f8-dc22-458b-a552-e60871c86e17\") " pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.355354 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-82ffe6a0-6fd2-442b-b488-f9203a983eb1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-82ffe6a0-6fd2-442b-b488-f9203a983eb1\") pod \"ovsdbserver-sb-2\" (UID: \"59ff486b-0d07-48c7-bb88-0d13a30eaade\") " pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.366809 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-e34cd463-7321-4573-a1d2-c833c5651639\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e34cd463-7321-4573-a1d2-c833c5651639\") pod \"ovsdbserver-sb-1\" (UID: \"6e1b3eaf-d1a3-416d-97f8-3a889e94f595\") " pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.415572 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.427375 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.441641 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.447937 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"0428c32c-f26c-4baa-8914-1079f1097a09","Type":"ContainerStarted","Data":"b18eeee9e29fe2fdb4846c7e3987c5ba0f0c29867611b4c02bf36cf417ee7401"} Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.448007 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"0428c32c-f26c-4baa-8914-1079f1097a09","Type":"ContainerStarted","Data":"582f85b52f35c314716177a1fb1a2091fb58caf07d94a2c829f0256c5b74e0d7"} Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.448045 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"0428c32c-f26c-4baa-8914-1079f1097a09","Type":"ContainerStarted","Data":"a30ccf94437906d526c1c6faa9a0212f166496e078330eb1404f37e8c9a0fc15"} Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.460411 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"87d0ad06-efd9-4785-9dcd-6a215df2068e","Type":"ContainerStarted","Data":"63a97ab142456f2cd75d88772271a98b286290f0af9f9caa32e6df81b4f54d18"} Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.460776 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"87d0ad06-efd9-4785-9dcd-6a215df2068e","Type":"ContainerStarted","Data":"7431b6ab8f4ea41311635eee89b9cd62cdeb37fe539edb5399b9d57cc1e0d311"} Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.492465 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-1" podStartSLOduration=3.492379734 podStartE2EDuration="3.492379734s" podCreationTimestamp="2026-01-06 09:45:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:45:16.481148815 +0000 UTC m=+5418.527321652" watchObservedRunningTime="2026-01-06 09:45:16.492379734 +0000 UTC m=+5418.538552611" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.519813 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.528316 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=3.528290397 podStartE2EDuration="3.528290397s" podCreationTimestamp="2026-01-06 09:45:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:45:16.517360148 +0000 UTC m=+5418.563532985" watchObservedRunningTime="2026-01-06 09:45:16.528290397 +0000 UTC m=+5418.574463264" Jan 06 09:45:16 crc kubenswrapper[4784]: I0106 09:45:16.997253 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 06 09:45:17 crc kubenswrapper[4784]: W0106 09:45:17.016500 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod672bf4f8_dc22_458b_a552_e60871c86e17.slice/crio-480548539c2de8c4d1dee06545127a5a4ec2605a0ee6a808b86280b1b9db3960 WatchSource:0}: Error finding container 480548539c2de8c4d1dee06545127a5a4ec2605a0ee6a808b86280b1b9db3960: Status 404 returned error can't find the container with id 480548539c2de8c4d1dee06545127a5a4ec2605a0ee6a808b86280b1b9db3960 Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.121319 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.468207 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"672bf4f8-dc22-458b-a552-e60871c86e17","Type":"ContainerStarted","Data":"10fbd2bd7e9f56a4808a308b7237188ebbb4b2dda23bc65f9ac079ec7609778d"} Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.468411 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"672bf4f8-dc22-458b-a552-e60871c86e17","Type":"ContainerStarted","Data":"1f36e88436101c2319edbd28d7853ac8ebe2645ad34c60e6ccdcfdb0147f61e9"} Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.468498 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"672bf4f8-dc22-458b-a552-e60871c86e17","Type":"ContainerStarted","Data":"480548539c2de8c4d1dee06545127a5a4ec2605a0ee6a808b86280b1b9db3960"} Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.470244 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"59ff486b-0d07-48c7-bb88-0d13a30eaade","Type":"ContainerStarted","Data":"a34e02e0670b00e5b7870a068dcc6bcc18f9a68330aeda32eb77252dbe659694"} Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.470352 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"59ff486b-0d07-48c7-bb88-0d13a30eaade","Type":"ContainerStarted","Data":"1d9ab2a2247b612d02a8a1e149e5459ddf721f3314f4c80068656c03c91018cc"} Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.470414 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"59ff486b-0d07-48c7-bb88-0d13a30eaade","Type":"ContainerStarted","Data":"2b3346435d1a75d83e067347233821402d4b0fed73553346d1af0c938dbad533"} Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.472754 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"52b356d6-b34e-4eac-9f70-5138fe492d64","Type":"ContainerStarted","Data":"c1b1f6131b4c0882d66f0d9b95aab8abec833bfd0fcad05b779b5795d242c2bc"} Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.472798 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"52b356d6-b34e-4eac-9f70-5138fe492d64","Type":"ContainerStarted","Data":"ad149442cda26f67df826b5147f5a09d145a68fc1479b2871b86416dee44cdd3"} Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.472814 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"52b356d6-b34e-4eac-9f70-5138fe492d64","Type":"ContainerStarted","Data":"6bf781657e4a09d28d9ef5911dca8d098c24aa976815beb25e3a3686c11d2ea5"} Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.487053 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=2.487034082 podStartE2EDuration="2.487034082s" podCreationTimestamp="2026-01-06 09:45:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:45:17.483897786 +0000 UTC m=+5419.530070653" watchObservedRunningTime="2026-01-06 09:45:17.487034082 +0000 UTC m=+5419.533206919" Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.515219 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-2" podStartSLOduration=4.5151971159999995 podStartE2EDuration="4.515197116s" podCreationTimestamp="2026-01-06 09:45:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:45:17.507347032 +0000 UTC m=+5419.553519889" watchObservedRunningTime="2026-01-06 09:45:17.515197116 +0000 UTC m=+5419.561369963" Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.531615 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-2" podStartSLOduration=2.531595245 podStartE2EDuration="2.531595245s" podCreationTimestamp="2026-01-06 09:45:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:45:17.530337586 +0000 UTC m=+5419.576510443" watchObservedRunningTime="2026-01-06 09:45:17.531595245 +0000 UTC m=+5419.577768082" Jan 06 09:45:17 crc kubenswrapper[4784]: W0106 09:45:17.760815 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6e1b3eaf_d1a3_416d_97f8_3a889e94f595.slice/crio-3f9cf9c8fa6b490aa5989ab728bf4958d38a3b241f87d2fa21b9000b124095f8 WatchSource:0}: Error finding container 3f9cf9c8fa6b490aa5989ab728bf4958d38a3b241f87d2fa21b9000b124095f8: Status 404 returned error can't find the container with id 3f9cf9c8fa6b490aa5989ab728bf4958d38a3b241f87d2fa21b9000b124095f8 Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.762338 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.788087 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:17 crc kubenswrapper[4784]: I0106 09:45:17.835896 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:18 crc kubenswrapper[4784]: I0106 09:45:18.058610 4784 scope.go:117] "RemoveContainer" containerID="68927c9761c11ee126ab423e136b363ffc3052cada487e5f84574ba9649cc7cd" Jan 06 09:45:18 crc kubenswrapper[4784]: I0106 09:45:18.087293 4784 scope.go:117] "RemoveContainer" containerID="90e4cec34758895a85b624729094bb131c08429a4c1d73b5981933e599258e20" Jan 06 09:45:18 crc kubenswrapper[4784]: I0106 09:45:18.124614 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:18 crc kubenswrapper[4784]: I0106 09:45:18.180956 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:18 crc kubenswrapper[4784]: I0106 09:45:18.485141 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"6e1b3eaf-d1a3-416d-97f8-3a889e94f595","Type":"ContainerStarted","Data":"642f4cf366aa7638b771724551cb5392a639a6aeaf683bd6df0cbef07972e177"} Jan 06 09:45:18 crc kubenswrapper[4784]: I0106 09:45:18.485194 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"6e1b3eaf-d1a3-416d-97f8-3a889e94f595","Type":"ContainerStarted","Data":"f2e390786e5220e18d22f3b669b4a3f37fc8458242c257553eafd78a1ed0b0f2"} Jan 06 09:45:18 crc kubenswrapper[4784]: I0106 09:45:18.485206 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"6e1b3eaf-d1a3-416d-97f8-3a889e94f595","Type":"ContainerStarted","Data":"3f9cf9c8fa6b490aa5989ab728bf4958d38a3b241f87d2fa21b9000b124095f8"} Jan 06 09:45:18 crc kubenswrapper[4784]: I0106 09:45:18.486228 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:18 crc kubenswrapper[4784]: I0106 09:45:18.509362 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-1" podStartSLOduration=3.5093377 podStartE2EDuration="3.5093377s" podCreationTimestamp="2026-01-06 09:45:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:45:18.502950502 +0000 UTC m=+5420.549123379" watchObservedRunningTime="2026-01-06 09:45:18.5093377 +0000 UTC m=+5420.555510567" Jan 06 09:45:19 crc kubenswrapper[4784]: I0106 09:45:19.417795 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:19 crc kubenswrapper[4784]: I0106 09:45:19.428528 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:19 crc kubenswrapper[4784]: I0106 09:45:19.443648 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:19 crc kubenswrapper[4784]: I0106 09:45:19.788206 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:19 crc kubenswrapper[4784]: I0106 09:45:19.844502 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.200025 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-1" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.581430 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8574559fdf-xf75v"] Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.585435 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.587648 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.598379 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8574559fdf-xf75v"] Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.796200 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x54cw\" (UniqueName: \"kubernetes.io/projected/0bf5a852-40e6-4b31-9d21-ff058928ea3a-kube-api-access-x54cw\") pod \"dnsmasq-dns-8574559fdf-xf75v\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.796433 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-ovsdbserver-nb\") pod \"dnsmasq-dns-8574559fdf-xf75v\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.796578 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-config\") pod \"dnsmasq-dns-8574559fdf-xf75v\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.796673 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-dns-svc\") pod \"dnsmasq-dns-8574559fdf-xf75v\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.844408 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.895203 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.898635 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-config\") pod \"dnsmasq-dns-8574559fdf-xf75v\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.898760 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-dns-svc\") pod \"dnsmasq-dns-8574559fdf-xf75v\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.898885 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x54cw\" (UniqueName: \"kubernetes.io/projected/0bf5a852-40e6-4b31-9d21-ff058928ea3a-kube-api-access-x54cw\") pod \"dnsmasq-dns-8574559fdf-xf75v\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.898945 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-ovsdbserver-nb\") pod \"dnsmasq-dns-8574559fdf-xf75v\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.899795 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-config\") pod \"dnsmasq-dns-8574559fdf-xf75v\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.900438 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-ovsdbserver-nb\") pod \"dnsmasq-dns-8574559fdf-xf75v\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.901211 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-dns-svc\") pod \"dnsmasq-dns-8574559fdf-xf75v\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.916730 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 06 09:45:20 crc kubenswrapper[4784]: I0106 09:45:20.926957 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x54cw\" (UniqueName: \"kubernetes.io/projected/0bf5a852-40e6-4b31-9d21-ff058928ea3a-kube-api-access-x54cw\") pod \"dnsmasq-dns-8574559fdf-xf75v\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:21 crc kubenswrapper[4784]: I0106 09:45:21.201230 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:21 crc kubenswrapper[4784]: I0106 09:45:21.416233 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:21 crc kubenswrapper[4784]: I0106 09:45:21.431241 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:21 crc kubenswrapper[4784]: I0106 09:45:21.443093 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:21 crc kubenswrapper[4784]: I0106 09:45:21.573314 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-2" Jan 06 09:45:21 crc kubenswrapper[4784]: I0106 09:45:21.687504 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8574559fdf-xf75v"] Jan 06 09:45:21 crc kubenswrapper[4784]: W0106 09:45:21.702209 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0bf5a852_40e6_4b31_9d21_ff058928ea3a.slice/crio-a049803274b0e6506e6d3ad9049b547a926e48156e48e356d1c2bf1b0ae65383 WatchSource:0}: Error finding container a049803274b0e6506e6d3ad9049b547a926e48156e48e356d1c2bf1b0ae65383: Status 404 returned error can't find the container with id a049803274b0e6506e6d3ad9049b547a926e48156e48e356d1c2bf1b0ae65383 Jan 06 09:45:22 crc kubenswrapper[4784]: I0106 09:45:22.491485 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:22 crc kubenswrapper[4784]: I0106 09:45:22.503127 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:22 crc kubenswrapper[4784]: I0106 09:45:22.506388 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:22 crc kubenswrapper[4784]: I0106 09:45:22.538362 4784 generic.go:334] "Generic (PLEG): container finished" podID="0bf5a852-40e6-4b31-9d21-ff058928ea3a" containerID="cc1ff09bcca914aae4dcf7b04776b134419a1f07798c3dc27d3ab5b9da509ce0" exitCode=0 Jan 06 09:45:22 crc kubenswrapper[4784]: I0106 09:45:22.538454 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8574559fdf-xf75v" event={"ID":"0bf5a852-40e6-4b31-9d21-ff058928ea3a","Type":"ContainerDied","Data":"cc1ff09bcca914aae4dcf7b04776b134419a1f07798c3dc27d3ab5b9da509ce0"} Jan 06 09:45:22 crc kubenswrapper[4784]: I0106 09:45:22.538497 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8574559fdf-xf75v" event={"ID":"0bf5a852-40e6-4b31-9d21-ff058928ea3a","Type":"ContainerStarted","Data":"a049803274b0e6506e6d3ad9049b547a926e48156e48e356d1c2bf1b0ae65383"} Jan 06 09:45:22 crc kubenswrapper[4784]: I0106 09:45:22.565050 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 06 09:45:22 crc kubenswrapper[4784]: I0106 09:45:22.570527 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-2" Jan 06 09:45:22 crc kubenswrapper[4784]: I0106 09:45:22.616940 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-1" Jan 06 09:45:22 crc kubenswrapper[4784]: I0106 09:45:22.967785 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8574559fdf-xf75v"] Jan 06 09:45:22 crc kubenswrapper[4784]: I0106 09:45:22.992916 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6998c99fcf-jlxp5"] Jan 06 09:45:22 crc kubenswrapper[4784]: I0106 09:45:22.994104 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.000473 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.019611 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6998c99fcf-jlxp5"] Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.150222 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-config\") pod \"dnsmasq-dns-6998c99fcf-jlxp5\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.150315 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frk2s\" (UniqueName: \"kubernetes.io/projected/77986454-9ec3-49f9-819d-8fe115385022-kube-api-access-frk2s\") pod \"dnsmasq-dns-6998c99fcf-jlxp5\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.150357 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-ovsdbserver-sb\") pod \"dnsmasq-dns-6998c99fcf-jlxp5\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.150397 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-dns-svc\") pod \"dnsmasq-dns-6998c99fcf-jlxp5\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.150422 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-ovsdbserver-nb\") pod \"dnsmasq-dns-6998c99fcf-jlxp5\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.251798 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-dns-svc\") pod \"dnsmasq-dns-6998c99fcf-jlxp5\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.251869 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-ovsdbserver-nb\") pod \"dnsmasq-dns-6998c99fcf-jlxp5\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.251913 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-config\") pod \"dnsmasq-dns-6998c99fcf-jlxp5\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.251969 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frk2s\" (UniqueName: \"kubernetes.io/projected/77986454-9ec3-49f9-819d-8fe115385022-kube-api-access-frk2s\") pod \"dnsmasq-dns-6998c99fcf-jlxp5\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.252001 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-ovsdbserver-sb\") pod \"dnsmasq-dns-6998c99fcf-jlxp5\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.252929 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-ovsdbserver-sb\") pod \"dnsmasq-dns-6998c99fcf-jlxp5\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.253886 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-dns-svc\") pod \"dnsmasq-dns-6998c99fcf-jlxp5\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.254026 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-config\") pod \"dnsmasq-dns-6998c99fcf-jlxp5\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.254151 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-ovsdbserver-nb\") pod \"dnsmasq-dns-6998c99fcf-jlxp5\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.282075 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frk2s\" (UniqueName: \"kubernetes.io/projected/77986454-9ec3-49f9-819d-8fe115385022-kube-api-access-frk2s\") pod \"dnsmasq-dns-6998c99fcf-jlxp5\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.310012 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.552569 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8574559fdf-xf75v" event={"ID":"0bf5a852-40e6-4b31-9d21-ff058928ea3a","Type":"ContainerStarted","Data":"3b893cfca13b62d2c778e63ca38dc8e2db1c64ba0a7a06bbb1c44404752511ae"} Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.569739 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8574559fdf-xf75v" podStartSLOduration=3.5697017779999998 podStartE2EDuration="3.569701778s" podCreationTimestamp="2026-01-06 09:45:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:45:23.566889811 +0000 UTC m=+5425.613062648" watchObservedRunningTime="2026-01-06 09:45:23.569701778 +0000 UTC m=+5425.615874615" Jan 06 09:45:23 crc kubenswrapper[4784]: W0106 09:45:23.776848 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77986454_9ec3_49f9_819d_8fe115385022.slice/crio-1f531c27c2a837f0d128e044047cf348816a574cba2165cbfdd55aeed8901343 WatchSource:0}: Error finding container 1f531c27c2a837f0d128e044047cf348816a574cba2165cbfdd55aeed8901343: Status 404 returned error can't find the container with id 1f531c27c2a837f0d128e044047cf348816a574cba2165cbfdd55aeed8901343 Jan 06 09:45:23 crc kubenswrapper[4784]: I0106 09:45:23.779978 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6998c99fcf-jlxp5"] Jan 06 09:45:24 crc kubenswrapper[4784]: I0106 09:45:24.561122 4784 generic.go:334] "Generic (PLEG): container finished" podID="77986454-9ec3-49f9-819d-8fe115385022" containerID="e3d50610a4b7420b942bf5a326b16a103208128d14cf5441ff150c5a3725c425" exitCode=0 Jan 06 09:45:24 crc kubenswrapper[4784]: I0106 09:45:24.561253 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" event={"ID":"77986454-9ec3-49f9-819d-8fe115385022","Type":"ContainerDied","Data":"e3d50610a4b7420b942bf5a326b16a103208128d14cf5441ff150c5a3725c425"} Jan 06 09:45:24 crc kubenswrapper[4784]: I0106 09:45:24.561610 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" event={"ID":"77986454-9ec3-49f9-819d-8fe115385022","Type":"ContainerStarted","Data":"1f531c27c2a837f0d128e044047cf348816a574cba2165cbfdd55aeed8901343"} Jan 06 09:45:24 crc kubenswrapper[4784]: I0106 09:45:24.561800 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8574559fdf-xf75v" podUID="0bf5a852-40e6-4b31-9d21-ff058928ea3a" containerName="dnsmasq-dns" containerID="cri-o://3b893cfca13b62d2c778e63ca38dc8e2db1c64ba0a7a06bbb1c44404752511ae" gracePeriod=10 Jan 06 09:45:24 crc kubenswrapper[4784]: I0106 09:45:24.561881 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:24 crc kubenswrapper[4784]: I0106 09:45:24.965781 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.089487 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-dns-svc\") pod \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.089594 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-ovsdbserver-nb\") pod \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.089615 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-config\") pod \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.089787 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x54cw\" (UniqueName: \"kubernetes.io/projected/0bf5a852-40e6-4b31-9d21-ff058928ea3a-kube-api-access-x54cw\") pod \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\" (UID: \"0bf5a852-40e6-4b31-9d21-ff058928ea3a\") " Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.095457 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bf5a852-40e6-4b31-9d21-ff058928ea3a-kube-api-access-x54cw" (OuterVolumeSpecName: "kube-api-access-x54cw") pod "0bf5a852-40e6-4b31-9d21-ff058928ea3a" (UID: "0bf5a852-40e6-4b31-9d21-ff058928ea3a"). InnerVolumeSpecName "kube-api-access-x54cw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.130162 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-config" (OuterVolumeSpecName: "config") pod "0bf5a852-40e6-4b31-9d21-ff058928ea3a" (UID: "0bf5a852-40e6-4b31-9d21-ff058928ea3a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.139903 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0bf5a852-40e6-4b31-9d21-ff058928ea3a" (UID: "0bf5a852-40e6-4b31-9d21-ff058928ea3a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.142896 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0bf5a852-40e6-4b31-9d21-ff058928ea3a" (UID: "0bf5a852-40e6-4b31-9d21-ff058928ea3a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.191940 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x54cw\" (UniqueName: \"kubernetes.io/projected/0bf5a852-40e6-4b31-9d21-ff058928ea3a-kube-api-access-x54cw\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.192114 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.192186 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.192199 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0bf5a852-40e6-4b31-9d21-ff058928ea3a-config\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.321460 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-copy-data"] Jan 06 09:45:25 crc kubenswrapper[4784]: E0106 09:45:25.321907 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bf5a852-40e6-4b31-9d21-ff058928ea3a" containerName="init" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.321926 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bf5a852-40e6-4b31-9d21-ff058928ea3a" containerName="init" Jan 06 09:45:25 crc kubenswrapper[4784]: E0106 09:45:25.321938 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bf5a852-40e6-4b31-9d21-ff058928ea3a" containerName="dnsmasq-dns" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.321968 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bf5a852-40e6-4b31-9d21-ff058928ea3a" containerName="dnsmasq-dns" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.323135 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bf5a852-40e6-4b31-9d21-ff058928ea3a" containerName="dnsmasq-dns" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.324950 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.328910 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovn-data-cert" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.334068 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.394958 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-07ae46ce-b170-4cde-bad5-5c22fc65a3fd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-07ae46ce-b170-4cde-bad5-5c22fc65a3fd\") pod \"ovn-copy-data\" (UID: \"14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8\") " pod="openstack/ovn-copy-data" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.395042 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46n9f\" (UniqueName: \"kubernetes.io/projected/14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8-kube-api-access-46n9f\") pod \"ovn-copy-data\" (UID: \"14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8\") " pod="openstack/ovn-copy-data" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.395121 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8\") " pod="openstack/ovn-copy-data" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.497021 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8\") " pod="openstack/ovn-copy-data" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.497349 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-07ae46ce-b170-4cde-bad5-5c22fc65a3fd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-07ae46ce-b170-4cde-bad5-5c22fc65a3fd\") pod \"ovn-copy-data\" (UID: \"14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8\") " pod="openstack/ovn-copy-data" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.497532 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46n9f\" (UniqueName: \"kubernetes.io/projected/14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8-kube-api-access-46n9f\") pod \"ovn-copy-data\" (UID: \"14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8\") " pod="openstack/ovn-copy-data" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.502197 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8\") " pod="openstack/ovn-copy-data" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.502688 4784 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.502718 4784 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-07ae46ce-b170-4cde-bad5-5c22fc65a3fd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-07ae46ce-b170-4cde-bad5-5c22fc65a3fd\") pod \"ovn-copy-data\" (UID: \"14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e520eca6f45df5bc7c58be4f66a216d6fc7bb7d7a57446ab032d52ca4864eb2a/globalmount\"" pod="openstack/ovn-copy-data" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.514339 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46n9f\" (UniqueName: \"kubernetes.io/projected/14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8-kube-api-access-46n9f\") pod \"ovn-copy-data\" (UID: \"14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8\") " pod="openstack/ovn-copy-data" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.531328 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-07ae46ce-b170-4cde-bad5-5c22fc65a3fd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-07ae46ce-b170-4cde-bad5-5c22fc65a3fd\") pod \"ovn-copy-data\" (UID: \"14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8\") " pod="openstack/ovn-copy-data" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.569261 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" event={"ID":"77986454-9ec3-49f9-819d-8fe115385022","Type":"ContainerStarted","Data":"b895f2dcf6e239630b6df11af7123c08d27912522f617e6c26264507b14f168a"} Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.570460 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.576685 4784 generic.go:334] "Generic (PLEG): container finished" podID="0bf5a852-40e6-4b31-9d21-ff058928ea3a" containerID="3b893cfca13b62d2c778e63ca38dc8e2db1c64ba0a7a06bbb1c44404752511ae" exitCode=0 Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.576729 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8574559fdf-xf75v" event={"ID":"0bf5a852-40e6-4b31-9d21-ff058928ea3a","Type":"ContainerDied","Data":"3b893cfca13b62d2c778e63ca38dc8e2db1c64ba0a7a06bbb1c44404752511ae"} Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.576757 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8574559fdf-xf75v" event={"ID":"0bf5a852-40e6-4b31-9d21-ff058928ea3a","Type":"ContainerDied","Data":"a049803274b0e6506e6d3ad9049b547a926e48156e48e356d1c2bf1b0ae65383"} Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.576775 4784 scope.go:117] "RemoveContainer" containerID="3b893cfca13b62d2c778e63ca38dc8e2db1c64ba0a7a06bbb1c44404752511ae" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.577340 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8574559fdf-xf75v" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.597693 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" podStartSLOduration=3.597676555 podStartE2EDuration="3.597676555s" podCreationTimestamp="2026-01-06 09:45:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:45:25.591340619 +0000 UTC m=+5427.637513476" watchObservedRunningTime="2026-01-06 09:45:25.597676555 +0000 UTC m=+5427.643849382" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.631075 4784 scope.go:117] "RemoveContainer" containerID="cc1ff09bcca914aae4dcf7b04776b134419a1f07798c3dc27d3ab5b9da509ce0" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.635344 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8574559fdf-xf75v"] Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.646924 4784 scope.go:117] "RemoveContainer" containerID="3b893cfca13b62d2c778e63ca38dc8e2db1c64ba0a7a06bbb1c44404752511ae" Jan 06 09:45:25 crc kubenswrapper[4784]: E0106 09:45:25.647723 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b893cfca13b62d2c778e63ca38dc8e2db1c64ba0a7a06bbb1c44404752511ae\": container with ID starting with 3b893cfca13b62d2c778e63ca38dc8e2db1c64ba0a7a06bbb1c44404752511ae not found: ID does not exist" containerID="3b893cfca13b62d2c778e63ca38dc8e2db1c64ba0a7a06bbb1c44404752511ae" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.647775 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b893cfca13b62d2c778e63ca38dc8e2db1c64ba0a7a06bbb1c44404752511ae"} err="failed to get container status \"3b893cfca13b62d2c778e63ca38dc8e2db1c64ba0a7a06bbb1c44404752511ae\": rpc error: code = NotFound desc = could not find container \"3b893cfca13b62d2c778e63ca38dc8e2db1c64ba0a7a06bbb1c44404752511ae\": container with ID starting with 3b893cfca13b62d2c778e63ca38dc8e2db1c64ba0a7a06bbb1c44404752511ae not found: ID does not exist" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.647807 4784 scope.go:117] "RemoveContainer" containerID="cc1ff09bcca914aae4dcf7b04776b134419a1f07798c3dc27d3ab5b9da509ce0" Jan 06 09:45:25 crc kubenswrapper[4784]: E0106 09:45:25.648111 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc1ff09bcca914aae4dcf7b04776b134419a1f07798c3dc27d3ab5b9da509ce0\": container with ID starting with cc1ff09bcca914aae4dcf7b04776b134419a1f07798c3dc27d3ab5b9da509ce0 not found: ID does not exist" containerID="cc1ff09bcca914aae4dcf7b04776b134419a1f07798c3dc27d3ab5b9da509ce0" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.648139 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc1ff09bcca914aae4dcf7b04776b134419a1f07798c3dc27d3ab5b9da509ce0"} err="failed to get container status \"cc1ff09bcca914aae4dcf7b04776b134419a1f07798c3dc27d3ab5b9da509ce0\": rpc error: code = NotFound desc = could not find container \"cc1ff09bcca914aae4dcf7b04776b134419a1f07798c3dc27d3ab5b9da509ce0\": container with ID starting with cc1ff09bcca914aae4dcf7b04776b134419a1f07798c3dc27d3ab5b9da509ce0 not found: ID does not exist" Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.648825 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8574559fdf-xf75v"] Jan 06 09:45:25 crc kubenswrapper[4784]: I0106 09:45:25.652164 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Jan 06 09:45:26 crc kubenswrapper[4784]: I0106 09:45:26.263493 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Jan 06 09:45:26 crc kubenswrapper[4784]: W0106 09:45:26.271848 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14658b5d_ac33_4c8e_a5a2_3166b7e1a5b8.slice/crio-09d8084892814b173c6d2483e9a81c36730f0e8c290423c53871f57bb211a623 WatchSource:0}: Error finding container 09d8084892814b173c6d2483e9a81c36730f0e8c290423c53871f57bb211a623: Status 404 returned error can't find the container with id 09d8084892814b173c6d2483e9a81c36730f0e8c290423c53871f57bb211a623 Jan 06 09:45:26 crc kubenswrapper[4784]: I0106 09:45:26.274491 4784 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 06 09:45:26 crc kubenswrapper[4784]: I0106 09:45:26.323157 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0bf5a852-40e6-4b31-9d21-ff058928ea3a" path="/var/lib/kubelet/pods/0bf5a852-40e6-4b31-9d21-ff058928ea3a/volumes" Jan 06 09:45:26 crc kubenswrapper[4784]: I0106 09:45:26.588883 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8","Type":"ContainerStarted","Data":"09d8084892814b173c6d2483e9a81c36730f0e8c290423c53871f57bb211a623"} Jan 06 09:45:27 crc kubenswrapper[4784]: I0106 09:45:27.600627 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8","Type":"ContainerStarted","Data":"1fea5937b721abd20765b692fe444101356ad1ef98241d0a135dcb05cd256276"} Jan 06 09:45:27 crc kubenswrapper[4784]: I0106 09:45:27.623992 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-copy-data" podStartSLOduration=3.143775579 podStartE2EDuration="3.623976772s" podCreationTimestamp="2026-01-06 09:45:24 +0000 UTC" firstStartedPulling="2026-01-06 09:45:26.274149147 +0000 UTC m=+5428.320322004" lastFinishedPulling="2026-01-06 09:45:26.75435036 +0000 UTC m=+5428.800523197" observedRunningTime="2026-01-06 09:45:27.618823631 +0000 UTC m=+5429.664996468" watchObservedRunningTime="2026-01-06 09:45:27.623976772 +0000 UTC m=+5429.670149609" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.311777 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.410744 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-tfnwh"] Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.411057 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-699964fbc-tfnwh" podUID="584dcda2-5983-4606-a054-b09385d52977" containerName="dnsmasq-dns" containerID="cri-o://5a888016b08cc799974df137e01621e2fa6bd011199939cd5d205596e1f4dfdc" gracePeriod=10 Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.655762 4784 generic.go:334] "Generic (PLEG): container finished" podID="584dcda2-5983-4606-a054-b09385d52977" containerID="5a888016b08cc799974df137e01621e2fa6bd011199939cd5d205596e1f4dfdc" exitCode=0 Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.655950 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-tfnwh" event={"ID":"584dcda2-5983-4606-a054-b09385d52977","Type":"ContainerDied","Data":"5a888016b08cc799974df137e01621e2fa6bd011199939cd5d205596e1f4dfdc"} Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.835335 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-tfnwh" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.858254 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 06 09:45:33 crc kubenswrapper[4784]: E0106 09:45:33.858625 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="584dcda2-5983-4606-a054-b09385d52977" containerName="init" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.858640 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="584dcda2-5983-4606-a054-b09385d52977" containerName="init" Jan 06 09:45:33 crc kubenswrapper[4784]: E0106 09:45:33.858667 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="584dcda2-5983-4606-a054-b09385d52977" containerName="dnsmasq-dns" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.858673 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="584dcda2-5983-4606-a054-b09385d52977" containerName="dnsmasq-dns" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.858813 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="584dcda2-5983-4606-a054-b09385d52977" containerName="dnsmasq-dns" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.859607 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.868709 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.868980 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-zhp4b" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.869927 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.870197 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.896521 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.982029 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/584dcda2-5983-4606-a054-b09385d52977-dns-svc\") pod \"584dcda2-5983-4606-a054-b09385d52977\" (UID: \"584dcda2-5983-4606-a054-b09385d52977\") " Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.982138 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/584dcda2-5983-4606-a054-b09385d52977-config\") pod \"584dcda2-5983-4606-a054-b09385d52977\" (UID: \"584dcda2-5983-4606-a054-b09385d52977\") " Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.982163 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tc7l2\" (UniqueName: \"kubernetes.io/projected/584dcda2-5983-4606-a054-b09385d52977-kube-api-access-tc7l2\") pod \"584dcda2-5983-4606-a054-b09385d52977\" (UID: \"584dcda2-5983-4606-a054-b09385d52977\") " Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.982429 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0c462ba2-3550-4c14-a898-927acee7d1bc-scripts\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.982474 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c462ba2-3550-4c14-a898-927acee7d1bc-config\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.982505 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jb7mz\" (UniqueName: \"kubernetes.io/projected/0c462ba2-3550-4c14-a898-927acee7d1bc-kube-api-access-jb7mz\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.982570 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c462ba2-3550-4c14-a898-927acee7d1bc-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.982634 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0c462ba2-3550-4c14-a898-927acee7d1bc-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.982658 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c462ba2-3550-4c14-a898-927acee7d1bc-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.982723 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c462ba2-3550-4c14-a898-927acee7d1bc-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:33 crc kubenswrapper[4784]: I0106 09:45:33.989117 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/584dcda2-5983-4606-a054-b09385d52977-kube-api-access-tc7l2" (OuterVolumeSpecName: "kube-api-access-tc7l2") pod "584dcda2-5983-4606-a054-b09385d52977" (UID: "584dcda2-5983-4606-a054-b09385d52977"). InnerVolumeSpecName "kube-api-access-tc7l2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.019021 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/584dcda2-5983-4606-a054-b09385d52977-config" (OuterVolumeSpecName: "config") pod "584dcda2-5983-4606-a054-b09385d52977" (UID: "584dcda2-5983-4606-a054-b09385d52977"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.019345 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/584dcda2-5983-4606-a054-b09385d52977-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "584dcda2-5983-4606-a054-b09385d52977" (UID: "584dcda2-5983-4606-a054-b09385d52977"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.084156 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0c462ba2-3550-4c14-a898-927acee7d1bc-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.084211 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c462ba2-3550-4c14-a898-927acee7d1bc-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.084244 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c462ba2-3550-4c14-a898-927acee7d1bc-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.084268 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0c462ba2-3550-4c14-a898-927acee7d1bc-scripts\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.084302 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c462ba2-3550-4c14-a898-927acee7d1bc-config\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.084335 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jb7mz\" (UniqueName: \"kubernetes.io/projected/0c462ba2-3550-4c14-a898-927acee7d1bc-kube-api-access-jb7mz\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.084359 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c462ba2-3550-4c14-a898-927acee7d1bc-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.084448 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/584dcda2-5983-4606-a054-b09385d52977-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.084463 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/584dcda2-5983-4606-a054-b09385d52977-config\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.084473 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tc7l2\" (UniqueName: \"kubernetes.io/projected/584dcda2-5983-4606-a054-b09385d52977-kube-api-access-tc7l2\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.084709 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0c462ba2-3550-4c14-a898-927acee7d1bc-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.085159 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c462ba2-3550-4c14-a898-927acee7d1bc-config\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.085173 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0c462ba2-3550-4c14-a898-927acee7d1bc-scripts\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.088294 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c462ba2-3550-4c14-a898-927acee7d1bc-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.088334 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c462ba2-3550-4c14-a898-927acee7d1bc-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.088860 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c462ba2-3550-4c14-a898-927acee7d1bc-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.099841 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jb7mz\" (UniqueName: \"kubernetes.io/projected/0c462ba2-3550-4c14-a898-927acee7d1bc-kube-api-access-jb7mz\") pod \"ovn-northd-0\" (UID: \"0c462ba2-3550-4c14-a898-927acee7d1bc\") " pod="openstack/ovn-northd-0" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.177589 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.620179 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.671630 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-tfnwh" event={"ID":"584dcda2-5983-4606-a054-b09385d52977","Type":"ContainerDied","Data":"385d85d750b9e8ab6f03be97f23ef4d2563160190e7651a331b7eda54058b657"} Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.671785 4784 scope.go:117] "RemoveContainer" containerID="5a888016b08cc799974df137e01621e2fa6bd011199939cd5d205596e1f4dfdc" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.671969 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-tfnwh" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.674340 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"0c462ba2-3550-4c14-a898-927acee7d1bc","Type":"ContainerStarted","Data":"ac2098d30e32ad60970ea36f466ba1b754a84a2c4ff91ae8b44d018da5814a70"} Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.781735 4784 scope.go:117] "RemoveContainer" containerID="d6ef3442cba20c4f18d132e55fc8ac02ddd72e01ac6c20a75bb2c835a6bb0b4d" Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.809073 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-tfnwh"] Jan 06 09:45:34 crc kubenswrapper[4784]: I0106 09:45:34.817303 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-tfnwh"] Jan 06 09:45:35 crc kubenswrapper[4784]: I0106 09:45:35.686941 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"0c462ba2-3550-4c14-a898-927acee7d1bc","Type":"ContainerStarted","Data":"624115438d7f6bed9606a48ce03c06cac9e66535e061e807d2664db512951ed5"} Jan 06 09:45:35 crc kubenswrapper[4784]: I0106 09:45:35.687475 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 06 09:45:35 crc kubenswrapper[4784]: I0106 09:45:35.687491 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"0c462ba2-3550-4c14-a898-927acee7d1bc","Type":"ContainerStarted","Data":"06ae7fbfef7ee01fedf89ca0f0f678651110bd1128cb5720cda97c0657730cef"} Jan 06 09:45:35 crc kubenswrapper[4784]: I0106 09:45:35.723099 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.723077346 podStartE2EDuration="2.723077346s" podCreationTimestamp="2026-01-06 09:45:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:45:35.722193758 +0000 UTC m=+5437.768366615" watchObservedRunningTime="2026-01-06 09:45:35.723077346 +0000 UTC m=+5437.769250183" Jan 06 09:45:36 crc kubenswrapper[4784]: I0106 09:45:36.345382 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="584dcda2-5983-4606-a054-b09385d52977" path="/var/lib/kubelet/pods/584dcda2-5983-4606-a054-b09385d52977/volumes" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.581410 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-9hlnc"] Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.582752 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-9hlnc" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.593297 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-9hlnc"] Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.599326 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-b9e3-account-create-update-fc6gj"] Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.600363 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b9e3-account-create-update-fc6gj" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.601811 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.612522 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-b9e3-account-create-update-fc6gj"] Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.728416 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqcbv\" (UniqueName: \"kubernetes.io/projected/f1a0eaf0-8041-4652-9c05-ab5208875034-kube-api-access-nqcbv\") pod \"keystone-db-create-9hlnc\" (UID: \"f1a0eaf0-8041-4652-9c05-ab5208875034\") " pod="openstack/keystone-db-create-9hlnc" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.728502 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aaf0679d-d8f7-4e6b-bef2-23b9337d691d-operator-scripts\") pod \"keystone-b9e3-account-create-update-fc6gj\" (UID: \"aaf0679d-d8f7-4e6b-bef2-23b9337d691d\") " pod="openstack/keystone-b9e3-account-create-update-fc6gj" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.728661 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnqsg\" (UniqueName: \"kubernetes.io/projected/aaf0679d-d8f7-4e6b-bef2-23b9337d691d-kube-api-access-xnqsg\") pod \"keystone-b9e3-account-create-update-fc6gj\" (UID: \"aaf0679d-d8f7-4e6b-bef2-23b9337d691d\") " pod="openstack/keystone-b9e3-account-create-update-fc6gj" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.728865 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f1a0eaf0-8041-4652-9c05-ab5208875034-operator-scripts\") pod \"keystone-db-create-9hlnc\" (UID: \"f1a0eaf0-8041-4652-9c05-ab5208875034\") " pod="openstack/keystone-db-create-9hlnc" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.830819 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f1a0eaf0-8041-4652-9c05-ab5208875034-operator-scripts\") pod \"keystone-db-create-9hlnc\" (UID: \"f1a0eaf0-8041-4652-9c05-ab5208875034\") " pod="openstack/keystone-db-create-9hlnc" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.830898 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqcbv\" (UniqueName: \"kubernetes.io/projected/f1a0eaf0-8041-4652-9c05-ab5208875034-kube-api-access-nqcbv\") pod \"keystone-db-create-9hlnc\" (UID: \"f1a0eaf0-8041-4652-9c05-ab5208875034\") " pod="openstack/keystone-db-create-9hlnc" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.830948 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aaf0679d-d8f7-4e6b-bef2-23b9337d691d-operator-scripts\") pod \"keystone-b9e3-account-create-update-fc6gj\" (UID: \"aaf0679d-d8f7-4e6b-bef2-23b9337d691d\") " pod="openstack/keystone-b9e3-account-create-update-fc6gj" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.830987 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnqsg\" (UniqueName: \"kubernetes.io/projected/aaf0679d-d8f7-4e6b-bef2-23b9337d691d-kube-api-access-xnqsg\") pod \"keystone-b9e3-account-create-update-fc6gj\" (UID: \"aaf0679d-d8f7-4e6b-bef2-23b9337d691d\") " pod="openstack/keystone-b9e3-account-create-update-fc6gj" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.832432 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aaf0679d-d8f7-4e6b-bef2-23b9337d691d-operator-scripts\") pod \"keystone-b9e3-account-create-update-fc6gj\" (UID: \"aaf0679d-d8f7-4e6b-bef2-23b9337d691d\") " pod="openstack/keystone-b9e3-account-create-update-fc6gj" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.832443 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f1a0eaf0-8041-4652-9c05-ab5208875034-operator-scripts\") pod \"keystone-db-create-9hlnc\" (UID: \"f1a0eaf0-8041-4652-9c05-ab5208875034\") " pod="openstack/keystone-db-create-9hlnc" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.848784 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnqsg\" (UniqueName: \"kubernetes.io/projected/aaf0679d-d8f7-4e6b-bef2-23b9337d691d-kube-api-access-xnqsg\") pod \"keystone-b9e3-account-create-update-fc6gj\" (UID: \"aaf0679d-d8f7-4e6b-bef2-23b9337d691d\") " pod="openstack/keystone-b9e3-account-create-update-fc6gj" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.849786 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqcbv\" (UniqueName: \"kubernetes.io/projected/f1a0eaf0-8041-4652-9c05-ab5208875034-kube-api-access-nqcbv\") pod \"keystone-db-create-9hlnc\" (UID: \"f1a0eaf0-8041-4652-9c05-ab5208875034\") " pod="openstack/keystone-db-create-9hlnc" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.898420 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-9hlnc" Jan 06 09:45:40 crc kubenswrapper[4784]: I0106 09:45:40.912334 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b9e3-account-create-update-fc6gj" Jan 06 09:45:41 crc kubenswrapper[4784]: I0106 09:45:41.449726 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-b9e3-account-create-update-fc6gj"] Jan 06 09:45:41 crc kubenswrapper[4784]: I0106 09:45:41.463314 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-9hlnc"] Jan 06 09:45:41 crc kubenswrapper[4784]: W0106 09:45:41.467919 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf1a0eaf0_8041_4652_9c05_ab5208875034.slice/crio-8ae9d493470f813a584894c67c276321cffd6cc28ab36994e8a9e2a55c48bb11 WatchSource:0}: Error finding container 8ae9d493470f813a584894c67c276321cffd6cc28ab36994e8a9e2a55c48bb11: Status 404 returned error can't find the container with id 8ae9d493470f813a584894c67c276321cffd6cc28ab36994e8a9e2a55c48bb11 Jan 06 09:45:41 crc kubenswrapper[4784]: I0106 09:45:41.755904 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b9e3-account-create-update-fc6gj" event={"ID":"aaf0679d-d8f7-4e6b-bef2-23b9337d691d","Type":"ContainerStarted","Data":"a03a331c335015894d1221ef9a5a866b612f27ef8c20f30d3958af0f199feea5"} Jan 06 09:45:41 crc kubenswrapper[4784]: I0106 09:45:41.756967 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b9e3-account-create-update-fc6gj" event={"ID":"aaf0679d-d8f7-4e6b-bef2-23b9337d691d","Type":"ContainerStarted","Data":"01d0606f31c670e8c1b8e61f2c82c3d03c410d619c35a1768bc71bd9368f64ae"} Jan 06 09:45:41 crc kubenswrapper[4784]: I0106 09:45:41.758016 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-9hlnc" event={"ID":"f1a0eaf0-8041-4652-9c05-ab5208875034","Type":"ContainerStarted","Data":"3df8a17e3455c7d1560bcf7c1ed5c662c4b9e10992f786c1f5bd2bf2a1993fcd"} Jan 06 09:45:41 crc kubenswrapper[4784]: I0106 09:45:41.758042 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-9hlnc" event={"ID":"f1a0eaf0-8041-4652-9c05-ab5208875034","Type":"ContainerStarted","Data":"8ae9d493470f813a584894c67c276321cffd6cc28ab36994e8a9e2a55c48bb11"} Jan 06 09:45:41 crc kubenswrapper[4784]: I0106 09:45:41.781997 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-b9e3-account-create-update-fc6gj" podStartSLOduration=1.7819769829999998 podStartE2EDuration="1.781976983s" podCreationTimestamp="2026-01-06 09:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:45:41.775697989 +0000 UTC m=+5443.821870856" watchObservedRunningTime="2026-01-06 09:45:41.781976983 +0000 UTC m=+5443.828149820" Jan 06 09:45:41 crc kubenswrapper[4784]: I0106 09:45:41.800957 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-9hlnc" podStartSLOduration=1.800934381 podStartE2EDuration="1.800934381s" podCreationTimestamp="2026-01-06 09:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:45:41.794494671 +0000 UTC m=+5443.840667538" watchObservedRunningTime="2026-01-06 09:45:41.800934381 +0000 UTC m=+5443.847107258" Jan 06 09:45:42 crc kubenswrapper[4784]: I0106 09:45:42.775261 4784 generic.go:334] "Generic (PLEG): container finished" podID="f1a0eaf0-8041-4652-9c05-ab5208875034" containerID="3df8a17e3455c7d1560bcf7c1ed5c662c4b9e10992f786c1f5bd2bf2a1993fcd" exitCode=0 Jan 06 09:45:42 crc kubenswrapper[4784]: I0106 09:45:42.775699 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-9hlnc" event={"ID":"f1a0eaf0-8041-4652-9c05-ab5208875034","Type":"ContainerDied","Data":"3df8a17e3455c7d1560bcf7c1ed5c662c4b9e10992f786c1f5bd2bf2a1993fcd"} Jan 06 09:45:42 crc kubenswrapper[4784]: I0106 09:45:42.778414 4784 generic.go:334] "Generic (PLEG): container finished" podID="aaf0679d-d8f7-4e6b-bef2-23b9337d691d" containerID="a03a331c335015894d1221ef9a5a866b612f27ef8c20f30d3958af0f199feea5" exitCode=0 Jan 06 09:45:42 crc kubenswrapper[4784]: I0106 09:45:42.778455 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b9e3-account-create-update-fc6gj" event={"ID":"aaf0679d-d8f7-4e6b-bef2-23b9337d691d","Type":"ContainerDied","Data":"a03a331c335015894d1221ef9a5a866b612f27ef8c20f30d3958af0f199feea5"} Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.262886 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.321224 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-9hlnc" Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.332504 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b9e3-account-create-update-fc6gj" Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.497105 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f1a0eaf0-8041-4652-9c05-ab5208875034-operator-scripts\") pod \"f1a0eaf0-8041-4652-9c05-ab5208875034\" (UID: \"f1a0eaf0-8041-4652-9c05-ab5208875034\") " Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.497191 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aaf0679d-d8f7-4e6b-bef2-23b9337d691d-operator-scripts\") pod \"aaf0679d-d8f7-4e6b-bef2-23b9337d691d\" (UID: \"aaf0679d-d8f7-4e6b-bef2-23b9337d691d\") " Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.497332 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqcbv\" (UniqueName: \"kubernetes.io/projected/f1a0eaf0-8041-4652-9c05-ab5208875034-kube-api-access-nqcbv\") pod \"f1a0eaf0-8041-4652-9c05-ab5208875034\" (UID: \"f1a0eaf0-8041-4652-9c05-ab5208875034\") " Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.497393 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnqsg\" (UniqueName: \"kubernetes.io/projected/aaf0679d-d8f7-4e6b-bef2-23b9337d691d-kube-api-access-xnqsg\") pod \"aaf0679d-d8f7-4e6b-bef2-23b9337d691d\" (UID: \"aaf0679d-d8f7-4e6b-bef2-23b9337d691d\") " Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.498057 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aaf0679d-d8f7-4e6b-bef2-23b9337d691d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "aaf0679d-d8f7-4e6b-bef2-23b9337d691d" (UID: "aaf0679d-d8f7-4e6b-bef2-23b9337d691d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.499871 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1a0eaf0-8041-4652-9c05-ab5208875034-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f1a0eaf0-8041-4652-9c05-ab5208875034" (UID: "f1a0eaf0-8041-4652-9c05-ab5208875034"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.504754 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aaf0679d-d8f7-4e6b-bef2-23b9337d691d-kube-api-access-xnqsg" (OuterVolumeSpecName: "kube-api-access-xnqsg") pod "aaf0679d-d8f7-4e6b-bef2-23b9337d691d" (UID: "aaf0679d-d8f7-4e6b-bef2-23b9337d691d"). InnerVolumeSpecName "kube-api-access-xnqsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.504806 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1a0eaf0-8041-4652-9c05-ab5208875034-kube-api-access-nqcbv" (OuterVolumeSpecName: "kube-api-access-nqcbv") pod "f1a0eaf0-8041-4652-9c05-ab5208875034" (UID: "f1a0eaf0-8041-4652-9c05-ab5208875034"). InnerVolumeSpecName "kube-api-access-nqcbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.599106 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xnqsg\" (UniqueName: \"kubernetes.io/projected/aaf0679d-d8f7-4e6b-bef2-23b9337d691d-kube-api-access-xnqsg\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.599141 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f1a0eaf0-8041-4652-9c05-ab5208875034-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.599153 4784 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aaf0679d-d8f7-4e6b-bef2-23b9337d691d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.599161 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqcbv\" (UniqueName: \"kubernetes.io/projected/f1a0eaf0-8041-4652-9c05-ab5208875034-kube-api-access-nqcbv\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.812798 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-9hlnc" Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.813169 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-9hlnc" event={"ID":"f1a0eaf0-8041-4652-9c05-ab5208875034","Type":"ContainerDied","Data":"8ae9d493470f813a584894c67c276321cffd6cc28ab36994e8a9e2a55c48bb11"} Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.813210 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ae9d493470f813a584894c67c276321cffd6cc28ab36994e8a9e2a55c48bb11" Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.821481 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b9e3-account-create-update-fc6gj" event={"ID":"aaf0679d-d8f7-4e6b-bef2-23b9337d691d","Type":"ContainerDied","Data":"01d0606f31c670e8c1b8e61f2c82c3d03c410d619c35a1768bc71bd9368f64ae"} Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.821533 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="01d0606f31c670e8c1b8e61f2c82c3d03c410d619c35a1768bc71bd9368f64ae" Jan 06 09:45:44 crc kubenswrapper[4784]: I0106 09:45:44.821682 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b9e3-account-create-update-fc6gj" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.097665 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-p8sgr"] Jan 06 09:45:46 crc kubenswrapper[4784]: E0106 09:45:46.098246 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1a0eaf0-8041-4652-9c05-ab5208875034" containerName="mariadb-database-create" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.098260 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1a0eaf0-8041-4652-9c05-ab5208875034" containerName="mariadb-database-create" Jan 06 09:45:46 crc kubenswrapper[4784]: E0106 09:45:46.098274 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aaf0679d-d8f7-4e6b-bef2-23b9337d691d" containerName="mariadb-account-create-update" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.098280 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="aaf0679d-d8f7-4e6b-bef2-23b9337d691d" containerName="mariadb-account-create-update" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.098437 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1a0eaf0-8041-4652-9c05-ab5208875034" containerName="mariadb-database-create" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.098455 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="aaf0679d-d8f7-4e6b-bef2-23b9337d691d" containerName="mariadb-account-create-update" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.099034 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-p8sgr" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.101521 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.101666 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.101709 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-4c25j" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.101983 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.114865 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-p8sgr"] Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.235252 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcb83c22-8916-4c1c-976f-51c157f9b7db-combined-ca-bundle\") pod \"keystone-db-sync-p8sgr\" (UID: \"bcb83c22-8916-4c1c-976f-51c157f9b7db\") " pod="openstack/keystone-db-sync-p8sgr" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.235446 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcb83c22-8916-4c1c-976f-51c157f9b7db-config-data\") pod \"keystone-db-sync-p8sgr\" (UID: \"bcb83c22-8916-4c1c-976f-51c157f9b7db\") " pod="openstack/keystone-db-sync-p8sgr" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.235486 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wng7\" (UniqueName: \"kubernetes.io/projected/bcb83c22-8916-4c1c-976f-51c157f9b7db-kube-api-access-9wng7\") pod \"keystone-db-sync-p8sgr\" (UID: \"bcb83c22-8916-4c1c-976f-51c157f9b7db\") " pod="openstack/keystone-db-sync-p8sgr" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.337889 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcb83c22-8916-4c1c-976f-51c157f9b7db-combined-ca-bundle\") pod \"keystone-db-sync-p8sgr\" (UID: \"bcb83c22-8916-4c1c-976f-51c157f9b7db\") " pod="openstack/keystone-db-sync-p8sgr" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.338058 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcb83c22-8916-4c1c-976f-51c157f9b7db-config-data\") pod \"keystone-db-sync-p8sgr\" (UID: \"bcb83c22-8916-4c1c-976f-51c157f9b7db\") " pod="openstack/keystone-db-sync-p8sgr" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.338100 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wng7\" (UniqueName: \"kubernetes.io/projected/bcb83c22-8916-4c1c-976f-51c157f9b7db-kube-api-access-9wng7\") pod \"keystone-db-sync-p8sgr\" (UID: \"bcb83c22-8916-4c1c-976f-51c157f9b7db\") " pod="openstack/keystone-db-sync-p8sgr" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.346649 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcb83c22-8916-4c1c-976f-51c157f9b7db-combined-ca-bundle\") pod \"keystone-db-sync-p8sgr\" (UID: \"bcb83c22-8916-4c1c-976f-51c157f9b7db\") " pod="openstack/keystone-db-sync-p8sgr" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.346829 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcb83c22-8916-4c1c-976f-51c157f9b7db-config-data\") pod \"keystone-db-sync-p8sgr\" (UID: \"bcb83c22-8916-4c1c-976f-51c157f9b7db\") " pod="openstack/keystone-db-sync-p8sgr" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.366111 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wng7\" (UniqueName: \"kubernetes.io/projected/bcb83c22-8916-4c1c-976f-51c157f9b7db-kube-api-access-9wng7\") pod \"keystone-db-sync-p8sgr\" (UID: \"bcb83c22-8916-4c1c-976f-51c157f9b7db\") " pod="openstack/keystone-db-sync-p8sgr" Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.414059 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-p8sgr" Jan 06 09:45:46 crc kubenswrapper[4784]: W0106 09:45:46.753123 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbcb83c22_8916_4c1c_976f_51c157f9b7db.slice/crio-cbb8bb438a8424120ca99cc5aea4ff824748481a678e96ff65e7d04ab109fca5 WatchSource:0}: Error finding container cbb8bb438a8424120ca99cc5aea4ff824748481a678e96ff65e7d04ab109fca5: Status 404 returned error can't find the container with id cbb8bb438a8424120ca99cc5aea4ff824748481a678e96ff65e7d04ab109fca5 Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.762360 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-p8sgr"] Jan 06 09:45:46 crc kubenswrapper[4784]: I0106 09:45:46.840226 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-p8sgr" event={"ID":"bcb83c22-8916-4c1c-976f-51c157f9b7db","Type":"ContainerStarted","Data":"cbb8bb438a8424120ca99cc5aea4ff824748481a678e96ff65e7d04ab109fca5"} Jan 06 09:45:47 crc kubenswrapper[4784]: I0106 09:45:47.854216 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-p8sgr" event={"ID":"bcb83c22-8916-4c1c-976f-51c157f9b7db","Type":"ContainerStarted","Data":"b2346999dd29189988a2ad886abf3f4d3c93e9133994cf6dff37a7341c04a39c"} Jan 06 09:45:47 crc kubenswrapper[4784]: I0106 09:45:47.885322 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-p8sgr" podStartSLOduration=1.885292548 podStartE2EDuration="1.885292548s" podCreationTimestamp="2026-01-06 09:45:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:45:47.882595564 +0000 UTC m=+5449.928768431" watchObservedRunningTime="2026-01-06 09:45:47.885292548 +0000 UTC m=+5449.931465415" Jan 06 09:45:48 crc kubenswrapper[4784]: I0106 09:45:48.866976 4784 generic.go:334] "Generic (PLEG): container finished" podID="bcb83c22-8916-4c1c-976f-51c157f9b7db" containerID="b2346999dd29189988a2ad886abf3f4d3c93e9133994cf6dff37a7341c04a39c" exitCode=0 Jan 06 09:45:48 crc kubenswrapper[4784]: I0106 09:45:48.867280 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-p8sgr" event={"ID":"bcb83c22-8916-4c1c-976f-51c157f9b7db","Type":"ContainerDied","Data":"b2346999dd29189988a2ad886abf3f4d3c93e9133994cf6dff37a7341c04a39c"} Jan 06 09:45:50 crc kubenswrapper[4784]: I0106 09:45:50.330045 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-p8sgr" Jan 06 09:45:50 crc kubenswrapper[4784]: I0106 09:45:50.427934 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcb83c22-8916-4c1c-976f-51c157f9b7db-config-data\") pod \"bcb83c22-8916-4c1c-976f-51c157f9b7db\" (UID: \"bcb83c22-8916-4c1c-976f-51c157f9b7db\") " Jan 06 09:45:50 crc kubenswrapper[4784]: I0106 09:45:50.428194 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9wng7\" (UniqueName: \"kubernetes.io/projected/bcb83c22-8916-4c1c-976f-51c157f9b7db-kube-api-access-9wng7\") pod \"bcb83c22-8916-4c1c-976f-51c157f9b7db\" (UID: \"bcb83c22-8916-4c1c-976f-51c157f9b7db\") " Jan 06 09:45:50 crc kubenswrapper[4784]: I0106 09:45:50.428246 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcb83c22-8916-4c1c-976f-51c157f9b7db-combined-ca-bundle\") pod \"bcb83c22-8916-4c1c-976f-51c157f9b7db\" (UID: \"bcb83c22-8916-4c1c-976f-51c157f9b7db\") " Jan 06 09:45:50 crc kubenswrapper[4784]: I0106 09:45:50.434992 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcb83c22-8916-4c1c-976f-51c157f9b7db-kube-api-access-9wng7" (OuterVolumeSpecName: "kube-api-access-9wng7") pod "bcb83c22-8916-4c1c-976f-51c157f9b7db" (UID: "bcb83c22-8916-4c1c-976f-51c157f9b7db"). InnerVolumeSpecName "kube-api-access-9wng7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:45:50 crc kubenswrapper[4784]: I0106 09:45:50.465695 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcb83c22-8916-4c1c-976f-51c157f9b7db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bcb83c22-8916-4c1c-976f-51c157f9b7db" (UID: "bcb83c22-8916-4c1c-976f-51c157f9b7db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:45:50 crc kubenswrapper[4784]: I0106 09:45:50.499764 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcb83c22-8916-4c1c-976f-51c157f9b7db-config-data" (OuterVolumeSpecName: "config-data") pod "bcb83c22-8916-4c1c-976f-51c157f9b7db" (UID: "bcb83c22-8916-4c1c-976f-51c157f9b7db"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:45:50 crc kubenswrapper[4784]: I0106 09:45:50.530144 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9wng7\" (UniqueName: \"kubernetes.io/projected/bcb83c22-8916-4c1c-976f-51c157f9b7db-kube-api-access-9wng7\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:50 crc kubenswrapper[4784]: I0106 09:45:50.530192 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcb83c22-8916-4c1c-976f-51c157f9b7db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:50 crc kubenswrapper[4784]: I0106 09:45:50.530209 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcb83c22-8916-4c1c-976f-51c157f9b7db-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:50 crc kubenswrapper[4784]: I0106 09:45:50.889472 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-p8sgr" event={"ID":"bcb83c22-8916-4c1c-976f-51c157f9b7db","Type":"ContainerDied","Data":"cbb8bb438a8424120ca99cc5aea4ff824748481a678e96ff65e7d04ab109fca5"} Jan 06 09:45:50 crc kubenswrapper[4784]: I0106 09:45:50.889581 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cbb8bb438a8424120ca99cc5aea4ff824748481a678e96ff65e7d04ab109fca5" Jan 06 09:45:50 crc kubenswrapper[4784]: I0106 09:45:50.889657 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-p8sgr" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.176138 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75f555c9df-f87gg"] Jan 06 09:45:51 crc kubenswrapper[4784]: E0106 09:45:51.176969 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcb83c22-8916-4c1c-976f-51c157f9b7db" containerName="keystone-db-sync" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.176998 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcb83c22-8916-4c1c-976f-51c157f9b7db" containerName="keystone-db-sync" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.177205 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcb83c22-8916-4c1c-976f-51c157f9b7db" containerName="keystone-db-sync" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.178220 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.185866 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75f555c9df-f87gg"] Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.201315 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-l877t"] Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.202572 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.204896 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.204981 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.205296 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.205369 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.205492 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-4c25j" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.216006 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-l877t"] Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.245695 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a66f9730-85f2-42cc-8c48-765e2b138afe-ovsdbserver-nb\") pod \"dnsmasq-dns-75f555c9df-f87gg\" (UID: \"a66f9730-85f2-42cc-8c48-765e2b138afe\") " pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.245752 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lt277\" (UniqueName: \"kubernetes.io/projected/a66f9730-85f2-42cc-8c48-765e2b138afe-kube-api-access-lt277\") pod \"dnsmasq-dns-75f555c9df-f87gg\" (UID: \"a66f9730-85f2-42cc-8c48-765e2b138afe\") " pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.245778 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a66f9730-85f2-42cc-8c48-765e2b138afe-dns-svc\") pod \"dnsmasq-dns-75f555c9df-f87gg\" (UID: \"a66f9730-85f2-42cc-8c48-765e2b138afe\") " pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.245808 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a66f9730-85f2-42cc-8c48-765e2b138afe-ovsdbserver-sb\") pod \"dnsmasq-dns-75f555c9df-f87gg\" (UID: \"a66f9730-85f2-42cc-8c48-765e2b138afe\") " pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.245884 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a66f9730-85f2-42cc-8c48-765e2b138afe-config\") pod \"dnsmasq-dns-75f555c9df-f87gg\" (UID: \"a66f9730-85f2-42cc-8c48-765e2b138afe\") " pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.347566 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lt277\" (UniqueName: \"kubernetes.io/projected/a66f9730-85f2-42cc-8c48-765e2b138afe-kube-api-access-lt277\") pod \"dnsmasq-dns-75f555c9df-f87gg\" (UID: \"a66f9730-85f2-42cc-8c48-765e2b138afe\") " pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.347641 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-scripts\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.347664 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a66f9730-85f2-42cc-8c48-765e2b138afe-dns-svc\") pod \"dnsmasq-dns-75f555c9df-f87gg\" (UID: \"a66f9730-85f2-42cc-8c48-765e2b138afe\") " pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.347698 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a66f9730-85f2-42cc-8c48-765e2b138afe-ovsdbserver-sb\") pod \"dnsmasq-dns-75f555c9df-f87gg\" (UID: \"a66f9730-85f2-42cc-8c48-765e2b138afe\") " pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.347778 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq7jt\" (UniqueName: \"kubernetes.io/projected/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-kube-api-access-tq7jt\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.347935 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a66f9730-85f2-42cc-8c48-765e2b138afe-config\") pod \"dnsmasq-dns-75f555c9df-f87gg\" (UID: \"a66f9730-85f2-42cc-8c48-765e2b138afe\") " pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.348003 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-config-data\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.348021 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-combined-ca-bundle\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.348100 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-fernet-keys\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.348163 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-credential-keys\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.348222 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a66f9730-85f2-42cc-8c48-765e2b138afe-ovsdbserver-nb\") pod \"dnsmasq-dns-75f555c9df-f87gg\" (UID: \"a66f9730-85f2-42cc-8c48-765e2b138afe\") " pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.348581 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a66f9730-85f2-42cc-8c48-765e2b138afe-ovsdbserver-sb\") pod \"dnsmasq-dns-75f555c9df-f87gg\" (UID: \"a66f9730-85f2-42cc-8c48-765e2b138afe\") " pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.348649 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a66f9730-85f2-42cc-8c48-765e2b138afe-dns-svc\") pod \"dnsmasq-dns-75f555c9df-f87gg\" (UID: \"a66f9730-85f2-42cc-8c48-765e2b138afe\") " pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.349050 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a66f9730-85f2-42cc-8c48-765e2b138afe-ovsdbserver-nb\") pod \"dnsmasq-dns-75f555c9df-f87gg\" (UID: \"a66f9730-85f2-42cc-8c48-765e2b138afe\") " pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.349141 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a66f9730-85f2-42cc-8c48-765e2b138afe-config\") pod \"dnsmasq-dns-75f555c9df-f87gg\" (UID: \"a66f9730-85f2-42cc-8c48-765e2b138afe\") " pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.382927 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lt277\" (UniqueName: \"kubernetes.io/projected/a66f9730-85f2-42cc-8c48-765e2b138afe-kube-api-access-lt277\") pod \"dnsmasq-dns-75f555c9df-f87gg\" (UID: \"a66f9730-85f2-42cc-8c48-765e2b138afe\") " pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.457291 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-config-data\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.457333 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-combined-ca-bundle\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.457371 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-fernet-keys\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.457399 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-credential-keys\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.457485 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-scripts\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.457617 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tq7jt\" (UniqueName: \"kubernetes.io/projected/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-kube-api-access-tq7jt\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.460688 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-combined-ca-bundle\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.460965 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-scripts\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.462203 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-fernet-keys\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.462805 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-config-data\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.464467 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-credential-keys\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.476852 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tq7jt\" (UniqueName: \"kubernetes.io/projected/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-kube-api-access-tq7jt\") pod \"keystone-bootstrap-l877t\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.512121 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.527248 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:51 crc kubenswrapper[4784]: I0106 09:45:51.956388 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75f555c9df-f87gg"] Jan 06 09:45:51 crc kubenswrapper[4784]: W0106 09:45:51.970054 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda66f9730_85f2_42cc_8c48_765e2b138afe.slice/crio-9ce0576c9089e90bc5e3392c7e251d22c6657e60c3bec482f09b152a18d72ef7 WatchSource:0}: Error finding container 9ce0576c9089e90bc5e3392c7e251d22c6657e60c3bec482f09b152a18d72ef7: Status 404 returned error can't find the container with id 9ce0576c9089e90bc5e3392c7e251d22c6657e60c3bec482f09b152a18d72ef7 Jan 06 09:45:52 crc kubenswrapper[4784]: I0106 09:45:52.033215 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-l877t"] Jan 06 09:45:52 crc kubenswrapper[4784]: I0106 09:45:52.915023 4784 generic.go:334] "Generic (PLEG): container finished" podID="a66f9730-85f2-42cc-8c48-765e2b138afe" containerID="1eeef11a1b7c6595d4fe58471338978df842a2777dd83353b191679a26dbec6e" exitCode=0 Jan 06 09:45:52 crc kubenswrapper[4784]: I0106 09:45:52.915107 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75f555c9df-f87gg" event={"ID":"a66f9730-85f2-42cc-8c48-765e2b138afe","Type":"ContainerDied","Data":"1eeef11a1b7c6595d4fe58471338978df842a2777dd83353b191679a26dbec6e"} Jan 06 09:45:52 crc kubenswrapper[4784]: I0106 09:45:52.915433 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75f555c9df-f87gg" event={"ID":"a66f9730-85f2-42cc-8c48-765e2b138afe","Type":"ContainerStarted","Data":"9ce0576c9089e90bc5e3392c7e251d22c6657e60c3bec482f09b152a18d72ef7"} Jan 06 09:45:52 crc kubenswrapper[4784]: I0106 09:45:52.918497 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l877t" event={"ID":"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7","Type":"ContainerStarted","Data":"9f53d41879fc8c8d25e30863005e67a16ff9acc68b3639929c3482bb4c962a34"} Jan 06 09:45:52 crc kubenswrapper[4784]: I0106 09:45:52.918596 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l877t" event={"ID":"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7","Type":"ContainerStarted","Data":"f74db32b279af7e1a0ea2e408e86346f88e62cc39511cbc95ce153ddccc06dca"} Jan 06 09:45:52 crc kubenswrapper[4784]: I0106 09:45:52.985370 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-l877t" podStartSLOduration=1.985353308 podStartE2EDuration="1.985353308s" podCreationTimestamp="2026-01-06 09:45:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:45:52.975009767 +0000 UTC m=+5455.021182614" watchObservedRunningTime="2026-01-06 09:45:52.985353308 +0000 UTC m=+5455.031526145" Jan 06 09:45:53 crc kubenswrapper[4784]: I0106 09:45:53.930667 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75f555c9df-f87gg" event={"ID":"a66f9730-85f2-42cc-8c48-765e2b138afe","Type":"ContainerStarted","Data":"977fe18acff8d4bd4616dfeab72528c1aee824012133b1b6ccf1011f3e5dfae9"} Jan 06 09:45:53 crc kubenswrapper[4784]: I0106 09:45:53.933147 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:45:53 crc kubenswrapper[4784]: I0106 09:45:53.963176 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75f555c9df-f87gg" podStartSLOduration=2.963155044 podStartE2EDuration="2.963155044s" podCreationTimestamp="2026-01-06 09:45:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:45:53.953600448 +0000 UTC m=+5455.999773305" watchObservedRunningTime="2026-01-06 09:45:53.963155044 +0000 UTC m=+5456.009327891" Jan 06 09:45:55 crc kubenswrapper[4784]: I0106 09:45:55.953186 4784 generic.go:334] "Generic (PLEG): container finished" podID="07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7" containerID="9f53d41879fc8c8d25e30863005e67a16ff9acc68b3639929c3482bb4c962a34" exitCode=0 Jan 06 09:45:55 crc kubenswrapper[4784]: I0106 09:45:55.953587 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l877t" event={"ID":"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7","Type":"ContainerDied","Data":"9f53d41879fc8c8d25e30863005e67a16ff9acc68b3639929c3482bb4c962a34"} Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.364718 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.482306 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-combined-ca-bundle\") pod \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.482353 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-credential-keys\") pod \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.482483 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-scripts\") pod \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.482536 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-config-data\") pod \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.482644 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-fernet-keys\") pod \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.482682 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tq7jt\" (UniqueName: \"kubernetes.io/projected/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-kube-api-access-tq7jt\") pod \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\" (UID: \"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7\") " Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.488673 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-kube-api-access-tq7jt" (OuterVolumeSpecName: "kube-api-access-tq7jt") pod "07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7" (UID: "07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7"). InnerVolumeSpecName "kube-api-access-tq7jt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.488836 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7" (UID: "07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.491503 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7" (UID: "07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.492296 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-scripts" (OuterVolumeSpecName: "scripts") pod "07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7" (UID: "07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.522866 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-config-data" (OuterVolumeSpecName: "config-data") pod "07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7" (UID: "07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.535502 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7" (UID: "07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.585536 4784 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.585930 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tq7jt\" (UniqueName: \"kubernetes.io/projected/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-kube-api-access-tq7jt\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.586079 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.586210 4784 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.586325 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.586462 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.980100 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l877t" event={"ID":"07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7","Type":"ContainerDied","Data":"f74db32b279af7e1a0ea2e408e86346f88e62cc39511cbc95ce153ddccc06dca"} Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.980163 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f74db32b279af7e1a0ea2e408e86346f88e62cc39511cbc95ce153ddccc06dca" Jan 06 09:45:57 crc kubenswrapper[4784]: I0106 09:45:57.980191 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l877t" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.097604 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-l877t"] Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.111940 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-l877t"] Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.171118 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-pcbzh"] Jan 06 09:45:58 crc kubenswrapper[4784]: E0106 09:45:58.171490 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7" containerName="keystone-bootstrap" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.171508 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7" containerName="keystone-bootstrap" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.171932 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7" containerName="keystone-bootstrap" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.172615 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.177269 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.177350 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.177455 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.178575 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.180842 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-4c25j" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.187417 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-pcbzh"] Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.301005 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-combined-ca-bundle\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.301057 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-credential-keys\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.301307 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6j9qf\" (UniqueName: \"kubernetes.io/projected/b6620201-92ef-41a1-bab1-d72b4bb416b3-kube-api-access-6j9qf\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.301598 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-scripts\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.301809 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-fernet-keys\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.301929 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-config-data\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.330443 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7" path="/var/lib/kubelet/pods/07a1e510-8e4c-4ad2-a64c-c4f6d2167ce7/volumes" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.403326 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-scripts\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.403406 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-fernet-keys\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.403440 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-config-data\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.403483 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-combined-ca-bundle\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.403501 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-credential-keys\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.403539 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6j9qf\" (UniqueName: \"kubernetes.io/projected/b6620201-92ef-41a1-bab1-d72b4bb416b3-kube-api-access-6j9qf\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.406683 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.406847 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.406934 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.415376 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-combined-ca-bundle\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.419600 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-scripts\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.420001 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-config-data\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.422296 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-fernet-keys\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.425307 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-credential-keys\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.427243 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6j9qf\" (UniqueName: \"kubernetes.io/projected/b6620201-92ef-41a1-bab1-d72b4bb416b3-kube-api-access-6j9qf\") pod \"keystone-bootstrap-pcbzh\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.499530 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-4c25j" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.507828 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:45:58 crc kubenswrapper[4784]: I0106 09:45:58.998726 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-pcbzh"] Jan 06 09:45:59 crc kubenswrapper[4784]: W0106 09:45:59.001851 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6620201_92ef_41a1_bab1_d72b4bb416b3.slice/crio-5cdd0fac5e10bb5134f625bf6e7bb5ee5b9f6affba179c592227e1297521e45f WatchSource:0}: Error finding container 5cdd0fac5e10bb5134f625bf6e7bb5ee5b9f6affba179c592227e1297521e45f: Status 404 returned error can't find the container with id 5cdd0fac5e10bb5134f625bf6e7bb5ee5b9f6affba179c592227e1297521e45f Jan 06 09:45:59 crc kubenswrapper[4784]: I0106 09:45:59.010667 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 06 09:45:59 crc kubenswrapper[4784]: I0106 09:45:59.998899 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pcbzh" event={"ID":"b6620201-92ef-41a1-bab1-d72b4bb416b3","Type":"ContainerStarted","Data":"bc3dc6c600a0892fc00a16865478bc0e3cf6985b46366b66395c37458344afca"} Jan 06 09:45:59 crc kubenswrapper[4784]: I0106 09:45:59.999278 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pcbzh" event={"ID":"b6620201-92ef-41a1-bab1-d72b4bb416b3","Type":"ContainerStarted","Data":"5cdd0fac5e10bb5134f625bf6e7bb5ee5b9f6affba179c592227e1297521e45f"} Jan 06 09:46:00 crc kubenswrapper[4784]: I0106 09:46:00.021291 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-pcbzh" podStartSLOduration=2.021272627 podStartE2EDuration="2.021272627s" podCreationTimestamp="2026-01-06 09:45:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:46:00.016138568 +0000 UTC m=+5462.062311415" watchObservedRunningTime="2026-01-06 09:46:00.021272627 +0000 UTC m=+5462.067445474" Jan 06 09:46:01 crc kubenswrapper[4784]: I0106 09:46:01.514840 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-75f555c9df-f87gg" Jan 06 09:46:01 crc kubenswrapper[4784]: I0106 09:46:01.576718 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6998c99fcf-jlxp5"] Jan 06 09:46:01 crc kubenswrapper[4784]: I0106 09:46:01.577108 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" podUID="77986454-9ec3-49f9-819d-8fe115385022" containerName="dnsmasq-dns" containerID="cri-o://b895f2dcf6e239630b6df11af7123c08d27912522f617e6c26264507b14f168a" gracePeriod=10 Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.020062 4784 generic.go:334] "Generic (PLEG): container finished" podID="77986454-9ec3-49f9-819d-8fe115385022" containerID="b895f2dcf6e239630b6df11af7123c08d27912522f617e6c26264507b14f168a" exitCode=0 Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.020200 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" event={"ID":"77986454-9ec3-49f9-819d-8fe115385022","Type":"ContainerDied","Data":"b895f2dcf6e239630b6df11af7123c08d27912522f617e6c26264507b14f168a"} Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.027158 4784 generic.go:334] "Generic (PLEG): container finished" podID="b6620201-92ef-41a1-bab1-d72b4bb416b3" containerID="bc3dc6c600a0892fc00a16865478bc0e3cf6985b46366b66395c37458344afca" exitCode=0 Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.027301 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pcbzh" event={"ID":"b6620201-92ef-41a1-bab1-d72b4bb416b3","Type":"ContainerDied","Data":"bc3dc6c600a0892fc00a16865478bc0e3cf6985b46366b66395c37458344afca"} Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.094743 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.274782 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-ovsdbserver-sb\") pod \"77986454-9ec3-49f9-819d-8fe115385022\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.274913 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frk2s\" (UniqueName: \"kubernetes.io/projected/77986454-9ec3-49f9-819d-8fe115385022-kube-api-access-frk2s\") pod \"77986454-9ec3-49f9-819d-8fe115385022\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.274944 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-dns-svc\") pod \"77986454-9ec3-49f9-819d-8fe115385022\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.275046 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-config\") pod \"77986454-9ec3-49f9-819d-8fe115385022\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.275107 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-ovsdbserver-nb\") pod \"77986454-9ec3-49f9-819d-8fe115385022\" (UID: \"77986454-9ec3-49f9-819d-8fe115385022\") " Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.287887 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77986454-9ec3-49f9-819d-8fe115385022-kube-api-access-frk2s" (OuterVolumeSpecName: "kube-api-access-frk2s") pod "77986454-9ec3-49f9-819d-8fe115385022" (UID: "77986454-9ec3-49f9-819d-8fe115385022"). InnerVolumeSpecName "kube-api-access-frk2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.312133 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "77986454-9ec3-49f9-819d-8fe115385022" (UID: "77986454-9ec3-49f9-819d-8fe115385022"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.330101 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-config" (OuterVolumeSpecName: "config") pod "77986454-9ec3-49f9-819d-8fe115385022" (UID: "77986454-9ec3-49f9-819d-8fe115385022"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.338775 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "77986454-9ec3-49f9-819d-8fe115385022" (UID: "77986454-9ec3-49f9-819d-8fe115385022"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.349648 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "77986454-9ec3-49f9-819d-8fe115385022" (UID: "77986454-9ec3-49f9-819d-8fe115385022"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.377160 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.377188 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frk2s\" (UniqueName: \"kubernetes.io/projected/77986454-9ec3-49f9-819d-8fe115385022-kube-api-access-frk2s\") on node \"crc\" DevicePath \"\"" Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.377199 4784 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.377207 4784 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-config\") on node \"crc\" DevicePath \"\"" Jan 06 09:46:02 crc kubenswrapper[4784]: I0106 09:46:02.377217 4784 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/77986454-9ec3-49f9-819d-8fe115385022-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.042737 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" event={"ID":"77986454-9ec3-49f9-819d-8fe115385022","Type":"ContainerDied","Data":"1f531c27c2a837f0d128e044047cf348816a574cba2165cbfdd55aeed8901343"} Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.042885 4784 scope.go:117] "RemoveContainer" containerID="b895f2dcf6e239630b6df11af7123c08d27912522f617e6c26264507b14f168a" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.043819 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6998c99fcf-jlxp5" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.071258 4784 scope.go:117] "RemoveContainer" containerID="e3d50610a4b7420b942bf5a326b16a103208128d14cf5441ff150c5a3725c425" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.114221 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6998c99fcf-jlxp5"] Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.124956 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6998c99fcf-jlxp5"] Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.423129 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.603763 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-combined-ca-bundle\") pod \"b6620201-92ef-41a1-bab1-d72b4bb416b3\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.603837 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-scripts\") pod \"b6620201-92ef-41a1-bab1-d72b4bb416b3\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.603873 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-config-data\") pod \"b6620201-92ef-41a1-bab1-d72b4bb416b3\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.603971 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-credential-keys\") pod \"b6620201-92ef-41a1-bab1-d72b4bb416b3\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.604081 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-fernet-keys\") pod \"b6620201-92ef-41a1-bab1-d72b4bb416b3\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.604170 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6j9qf\" (UniqueName: \"kubernetes.io/projected/b6620201-92ef-41a1-bab1-d72b4bb416b3-kube-api-access-6j9qf\") pod \"b6620201-92ef-41a1-bab1-d72b4bb416b3\" (UID: \"b6620201-92ef-41a1-bab1-d72b4bb416b3\") " Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.615230 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6620201-92ef-41a1-bab1-d72b4bb416b3-kube-api-access-6j9qf" (OuterVolumeSpecName: "kube-api-access-6j9qf") pod "b6620201-92ef-41a1-bab1-d72b4bb416b3" (UID: "b6620201-92ef-41a1-bab1-d72b4bb416b3"). InnerVolumeSpecName "kube-api-access-6j9qf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.615803 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b6620201-92ef-41a1-bab1-d72b4bb416b3" (UID: "b6620201-92ef-41a1-bab1-d72b4bb416b3"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.619803 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-scripts" (OuterVolumeSpecName: "scripts") pod "b6620201-92ef-41a1-bab1-d72b4bb416b3" (UID: "b6620201-92ef-41a1-bab1-d72b4bb416b3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.619744 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b6620201-92ef-41a1-bab1-d72b4bb416b3" (UID: "b6620201-92ef-41a1-bab1-d72b4bb416b3"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.663300 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-config-data" (OuterVolumeSpecName: "config-data") pod "b6620201-92ef-41a1-bab1-d72b4bb416b3" (UID: "b6620201-92ef-41a1-bab1-d72b4bb416b3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.672140 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6620201-92ef-41a1-bab1-d72b4bb416b3" (UID: "b6620201-92ef-41a1-bab1-d72b4bb416b3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.706558 4784 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.706600 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6j9qf\" (UniqueName: \"kubernetes.io/projected/b6620201-92ef-41a1-bab1-d72b4bb416b3-kube-api-access-6j9qf\") on node \"crc\" DevicePath \"\"" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.706616 4784 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.706629 4784 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-scripts\") on node \"crc\" DevicePath \"\"" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.706640 4784 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-config-data\") on node \"crc\" DevicePath \"\"" Jan 06 09:46:03 crc kubenswrapper[4784]: I0106 09:46:03.706653 4784 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6620201-92ef-41a1-bab1-d72b4bb416b3-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.058864 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pcbzh" event={"ID":"b6620201-92ef-41a1-bab1-d72b4bb416b3","Type":"ContainerDied","Data":"5cdd0fac5e10bb5134f625bf6e7bb5ee5b9f6affba179c592227e1297521e45f"} Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.058943 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5cdd0fac5e10bb5134f625bf6e7bb5ee5b9f6affba179c592227e1297521e45f" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.059643 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pcbzh" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.173795 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-5d7dd7c46d-9hl9r"] Jan 06 09:46:04 crc kubenswrapper[4784]: E0106 09:46:04.181699 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77986454-9ec3-49f9-819d-8fe115385022" containerName="dnsmasq-dns" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.181740 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="77986454-9ec3-49f9-819d-8fe115385022" containerName="dnsmasq-dns" Jan 06 09:46:04 crc kubenswrapper[4784]: E0106 09:46:04.181795 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6620201-92ef-41a1-bab1-d72b4bb416b3" containerName="keystone-bootstrap" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.181811 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6620201-92ef-41a1-bab1-d72b4bb416b3" containerName="keystone-bootstrap" Jan 06 09:46:04 crc kubenswrapper[4784]: E0106 09:46:04.181846 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77986454-9ec3-49f9-819d-8fe115385022" containerName="init" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.181859 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="77986454-9ec3-49f9-819d-8fe115385022" containerName="init" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.182132 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6620201-92ef-41a1-bab1-d72b4bb416b3" containerName="keystone-bootstrap" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.182189 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="77986454-9ec3-49f9-819d-8fe115385022" containerName="dnsmasq-dns" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.183089 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.189352 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.189785 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.189864 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.189884 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.190085 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-domains" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.190124 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.190249 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-4c25j" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.206174 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5d7dd7c46d-9hl9r"] Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.325390 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77986454-9ec3-49f9-819d-8fe115385022" path="/var/lib/kubelet/pods/77986454-9ec3-49f9-819d-8fe115385022/volumes" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.327258 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-internal-tls-certs\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.327306 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-config-data\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.327565 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-scripts\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.327695 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"keystone-domains\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-keystone-domains\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.327745 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-credential-keys\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.327772 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-public-tls-certs\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.327929 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-fernet-keys\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.327982 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7v7s\" (UniqueName: \"kubernetes.io/projected/b361ca5e-b2d1-4330-a8ca-3174c44bf574-kube-api-access-b7v7s\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.328036 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-combined-ca-bundle\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.429723 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-scripts\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.429823 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"keystone-domains\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-keystone-domains\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.429888 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-credential-keys\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.429922 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-public-tls-certs\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.430009 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-fernet-keys\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.430061 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7v7s\" (UniqueName: \"kubernetes.io/projected/b361ca5e-b2d1-4330-a8ca-3174c44bf574-kube-api-access-b7v7s\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.430130 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-combined-ca-bundle\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.430254 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-internal-tls-certs\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.430304 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-config-data\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.435275 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-fernet-keys\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.435292 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-public-tls-certs\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.436397 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-combined-ca-bundle\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.437023 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"keystone-domains\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-keystone-domains\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.437510 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-credential-keys\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.441830 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-scripts\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.442675 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-config-data\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.445660 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b361ca5e-b2d1-4330-a8ca-3174c44bf574-internal-tls-certs\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.454160 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7v7s\" (UniqueName: \"kubernetes.io/projected/b361ca5e-b2d1-4330-a8ca-3174c44bf574-kube-api-access-b7v7s\") pod \"keystone-5d7dd7c46d-9hl9r\" (UID: \"b361ca5e-b2d1-4330-a8ca-3174c44bf574\") " pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:04 crc kubenswrapper[4784]: I0106 09:46:04.513394 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:05 crc kubenswrapper[4784]: I0106 09:46:05.072202 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5d7dd7c46d-9hl9r"] Jan 06 09:46:06 crc kubenswrapper[4784]: I0106 09:46:06.081748 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5d7dd7c46d-9hl9r" event={"ID":"b361ca5e-b2d1-4330-a8ca-3174c44bf574","Type":"ContainerStarted","Data":"1a33e6f14ee2ba9be5c85fbea15450aa59eaedfc19113fe7607d287b2a1e270c"} Jan 06 09:46:06 crc kubenswrapper[4784]: I0106 09:46:06.082210 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:06 crc kubenswrapper[4784]: I0106 09:46:06.082233 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5d7dd7c46d-9hl9r" event={"ID":"b361ca5e-b2d1-4330-a8ca-3174c44bf574","Type":"ContainerStarted","Data":"57dae4bf28ec5ec039a17fa3f8af452bd5f0081a13e4e4acbb1eda175c8b83e3"} Jan 06 09:46:06 crc kubenswrapper[4784]: I0106 09:46:06.113065 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-5d7dd7c46d-9hl9r" podStartSLOduration=2.113029824 podStartE2EDuration="2.113029824s" podCreationTimestamp="2026-01-06 09:46:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:46:06.108469492 +0000 UTC m=+5468.154642379" watchObservedRunningTime="2026-01-06 09:46:06.113029824 +0000 UTC m=+5468.159202701" Jan 06 09:46:35 crc kubenswrapper[4784]: I0106 09:46:35.924363 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-5d7dd7c46d-9hl9r" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.524716 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.526395 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.533847 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.534423 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.534693 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.534842 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-9z7vs" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.583112 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slq7n\" (UniqueName: \"kubernetes.io/projected/750e0544-586b-41c1-b843-709705e8d9e4-kube-api-access-slq7n\") pod \"openstackclient\" (UID: \"750e0544-586b-41c1-b843-709705e8d9e4\") " pod="openstack/openstackclient" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.583162 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/750e0544-586b-41c1-b843-709705e8d9e4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"750e0544-586b-41c1-b843-709705e8d9e4\") " pod="openstack/openstackclient" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.583184 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/750e0544-586b-41c1-b843-709705e8d9e4-openstack-config\") pod \"openstackclient\" (UID: \"750e0544-586b-41c1-b843-709705e8d9e4\") " pod="openstack/openstackclient" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.583301 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/750e0544-586b-41c1-b843-709705e8d9e4-openstack-config-secret\") pod \"openstackclient\" (UID: \"750e0544-586b-41c1-b843-709705e8d9e4\") " pod="openstack/openstackclient" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.684962 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/750e0544-586b-41c1-b843-709705e8d9e4-openstack-config-secret\") pod \"openstackclient\" (UID: \"750e0544-586b-41c1-b843-709705e8d9e4\") " pod="openstack/openstackclient" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.685097 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slq7n\" (UniqueName: \"kubernetes.io/projected/750e0544-586b-41c1-b843-709705e8d9e4-kube-api-access-slq7n\") pod \"openstackclient\" (UID: \"750e0544-586b-41c1-b843-709705e8d9e4\") " pod="openstack/openstackclient" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.685130 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/750e0544-586b-41c1-b843-709705e8d9e4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"750e0544-586b-41c1-b843-709705e8d9e4\") " pod="openstack/openstackclient" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.685150 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/750e0544-586b-41c1-b843-709705e8d9e4-openstack-config\") pod \"openstackclient\" (UID: \"750e0544-586b-41c1-b843-709705e8d9e4\") " pod="openstack/openstackclient" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.686145 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/750e0544-586b-41c1-b843-709705e8d9e4-openstack-config\") pod \"openstackclient\" (UID: \"750e0544-586b-41c1-b843-709705e8d9e4\") " pod="openstack/openstackclient" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.692495 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/750e0544-586b-41c1-b843-709705e8d9e4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"750e0544-586b-41c1-b843-709705e8d9e4\") " pod="openstack/openstackclient" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.692514 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/750e0544-586b-41c1-b843-709705e8d9e4-openstack-config-secret\") pod \"openstackclient\" (UID: \"750e0544-586b-41c1-b843-709705e8d9e4\") " pod="openstack/openstackclient" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.714063 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slq7n\" (UniqueName: \"kubernetes.io/projected/750e0544-586b-41c1-b843-709705e8d9e4-kube-api-access-slq7n\") pod \"openstackclient\" (UID: \"750e0544-586b-41c1-b843-709705e8d9e4\") " pod="openstack/openstackclient" Jan 06 09:46:40 crc kubenswrapper[4784]: I0106 09:46:40.853086 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 06 09:46:41 crc kubenswrapper[4784]: I0106 09:46:41.341439 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 06 09:46:41 crc kubenswrapper[4784]: I0106 09:46:41.497206 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"750e0544-586b-41c1-b843-709705e8d9e4","Type":"ContainerStarted","Data":"6938f701d106904bef1bf2180f275d45b587e37de669a6c79e891863fead1d70"} Jan 06 09:46:42 crc kubenswrapper[4784]: I0106 09:46:42.510169 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"750e0544-586b-41c1-b843-709705e8d9e4","Type":"ContainerStarted","Data":"0efe15ca1310b2f7c1cc40f0712f18fafd413af3eef00a3703e13d9a30cc87f5"} Jan 06 09:46:42 crc kubenswrapper[4784]: I0106 09:46:42.544118 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.544095071 podStartE2EDuration="2.544095071s" podCreationTimestamp="2026-01-06 09:46:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-06 09:46:42.530607382 +0000 UTC m=+5504.576780229" watchObservedRunningTime="2026-01-06 09:46:42.544095071 +0000 UTC m=+5504.590267918" Jan 06 09:47:14 crc kubenswrapper[4784]: I0106 09:47:14.350739 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:47:14 crc kubenswrapper[4784]: I0106 09:47:14.351647 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:47:18 crc kubenswrapper[4784]: I0106 09:47:18.308887 4784 scope.go:117] "RemoveContainer" containerID="c1f53672bcd3fb880429d6d06e1cbe171a412c7697bcc7d97a3e52f6d6719216" Jan 06 09:47:18 crc kubenswrapper[4784]: I0106 09:47:18.348562 4784 scope.go:117] "RemoveContainer" containerID="0eabe4747a19425ea94ce8e680b71203c70c9756925aa63e565766cbf3a6eedc" Jan 06 09:47:18 crc kubenswrapper[4784]: I0106 09:47:18.393978 4784 scope.go:117] "RemoveContainer" containerID="90b4edc0a4d6664a79e304ca546da9883364ffe64ec55ab20db10ce9bdcf223d" Jan 06 09:47:18 crc kubenswrapper[4784]: I0106 09:47:18.465306 4784 scope.go:117] "RemoveContainer" containerID="f454c0cfeb169cb798895409809ae68f19010eacce8e1255d40ba6b2a27811e3" Jan 06 09:47:18 crc kubenswrapper[4784]: I0106 09:47:18.513193 4784 scope.go:117] "RemoveContainer" containerID="390a7ad5be1d855c43fdab3b3d7a26257213f7d5ec53ca258400c02ec03e2fad" Jan 06 09:47:18 crc kubenswrapper[4784]: I0106 09:47:18.542587 4784 scope.go:117] "RemoveContainer" containerID="cabfa6043195df7fe4e3a974ab3311b5a520f1fc4a8e33a58a4a038185197c1d" Jan 06 09:47:35 crc kubenswrapper[4784]: I0106 09:47:35.440990 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bv8qc"] Jan 06 09:47:35 crc kubenswrapper[4784]: I0106 09:47:35.444480 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:35 crc kubenswrapper[4784]: I0106 09:47:35.455388 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bv8qc"] Jan 06 09:47:35 crc kubenswrapper[4784]: I0106 09:47:35.574219 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8038e011-c551-4261-bcbd-d372fb530695-catalog-content\") pod \"community-operators-bv8qc\" (UID: \"8038e011-c551-4261-bcbd-d372fb530695\") " pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:35 crc kubenswrapper[4784]: I0106 09:47:35.574294 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gktd7\" (UniqueName: \"kubernetes.io/projected/8038e011-c551-4261-bcbd-d372fb530695-kube-api-access-gktd7\") pod \"community-operators-bv8qc\" (UID: \"8038e011-c551-4261-bcbd-d372fb530695\") " pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:35 crc kubenswrapper[4784]: I0106 09:47:35.574345 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8038e011-c551-4261-bcbd-d372fb530695-utilities\") pod \"community-operators-bv8qc\" (UID: \"8038e011-c551-4261-bcbd-d372fb530695\") " pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:35 crc kubenswrapper[4784]: I0106 09:47:35.676495 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8038e011-c551-4261-bcbd-d372fb530695-catalog-content\") pod \"community-operators-bv8qc\" (UID: \"8038e011-c551-4261-bcbd-d372fb530695\") " pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:35 crc kubenswrapper[4784]: I0106 09:47:35.676589 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gktd7\" (UniqueName: \"kubernetes.io/projected/8038e011-c551-4261-bcbd-d372fb530695-kube-api-access-gktd7\") pod \"community-operators-bv8qc\" (UID: \"8038e011-c551-4261-bcbd-d372fb530695\") " pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:35 crc kubenswrapper[4784]: I0106 09:47:35.676634 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8038e011-c551-4261-bcbd-d372fb530695-utilities\") pod \"community-operators-bv8qc\" (UID: \"8038e011-c551-4261-bcbd-d372fb530695\") " pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:35 crc kubenswrapper[4784]: I0106 09:47:35.677050 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8038e011-c551-4261-bcbd-d372fb530695-catalog-content\") pod \"community-operators-bv8qc\" (UID: \"8038e011-c551-4261-bcbd-d372fb530695\") " pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:35 crc kubenswrapper[4784]: I0106 09:47:35.677434 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8038e011-c551-4261-bcbd-d372fb530695-utilities\") pod \"community-operators-bv8qc\" (UID: \"8038e011-c551-4261-bcbd-d372fb530695\") " pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:35 crc kubenswrapper[4784]: I0106 09:47:35.701358 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gktd7\" (UniqueName: \"kubernetes.io/projected/8038e011-c551-4261-bcbd-d372fb530695-kube-api-access-gktd7\") pod \"community-operators-bv8qc\" (UID: \"8038e011-c551-4261-bcbd-d372fb530695\") " pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:35 crc kubenswrapper[4784]: I0106 09:47:35.788066 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:36 crc kubenswrapper[4784]: I0106 09:47:36.071032 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bv8qc"] Jan 06 09:47:36 crc kubenswrapper[4784]: I0106 09:47:36.193314 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bv8qc" event={"ID":"8038e011-c551-4261-bcbd-d372fb530695","Type":"ContainerStarted","Data":"48415b76014f4a3293f82cbd017730f742272fabfc60b7df4bb91333ae8dd1f7"} Jan 06 09:47:37 crc kubenswrapper[4784]: I0106 09:47:37.208615 4784 generic.go:334] "Generic (PLEG): container finished" podID="8038e011-c551-4261-bcbd-d372fb530695" containerID="db1962be4b1cc17a84905c933b31dde05f8a6a2e989ac47ae4062e8d8135c50b" exitCode=0 Jan 06 09:47:37 crc kubenswrapper[4784]: I0106 09:47:37.208676 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bv8qc" event={"ID":"8038e011-c551-4261-bcbd-d372fb530695","Type":"ContainerDied","Data":"db1962be4b1cc17a84905c933b31dde05f8a6a2e989ac47ae4062e8d8135c50b"} Jan 06 09:47:38 crc kubenswrapper[4784]: I0106 09:47:38.220987 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bv8qc" event={"ID":"8038e011-c551-4261-bcbd-d372fb530695","Type":"ContainerStarted","Data":"6bc3bd7061fefaff07a992f453a064e06357318ec4c9241fb86314c25c541c54"} Jan 06 09:47:39 crc kubenswrapper[4784]: I0106 09:47:39.239480 4784 generic.go:334] "Generic (PLEG): container finished" podID="8038e011-c551-4261-bcbd-d372fb530695" containerID="6bc3bd7061fefaff07a992f453a064e06357318ec4c9241fb86314c25c541c54" exitCode=0 Jan 06 09:47:39 crc kubenswrapper[4784]: I0106 09:47:39.239584 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bv8qc" event={"ID":"8038e011-c551-4261-bcbd-d372fb530695","Type":"ContainerDied","Data":"6bc3bd7061fefaff07a992f453a064e06357318ec4c9241fb86314c25c541c54"} Jan 06 09:47:40 crc kubenswrapper[4784]: I0106 09:47:40.264354 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bv8qc" event={"ID":"8038e011-c551-4261-bcbd-d372fb530695","Type":"ContainerStarted","Data":"9020cfdb4de626df9879d7966953e62e1b687fb5ee99276fd0ac3c4964e0da6d"} Jan 06 09:47:40 crc kubenswrapper[4784]: I0106 09:47:40.300459 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bv8qc" podStartSLOduration=2.671796262 podStartE2EDuration="5.300435169s" podCreationTimestamp="2026-01-06 09:47:35 +0000 UTC" firstStartedPulling="2026-01-06 09:47:37.211482995 +0000 UTC m=+5559.257655862" lastFinishedPulling="2026-01-06 09:47:39.840121922 +0000 UTC m=+5561.886294769" observedRunningTime="2026-01-06 09:47:40.294290548 +0000 UTC m=+5562.340463395" watchObservedRunningTime="2026-01-06 09:47:40.300435169 +0000 UTC m=+5562.346608036" Jan 06 09:47:44 crc kubenswrapper[4784]: I0106 09:47:44.351457 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:47:44 crc kubenswrapper[4784]: I0106 09:47:44.352902 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:47:45 crc kubenswrapper[4784]: I0106 09:47:45.788753 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:45 crc kubenswrapper[4784]: I0106 09:47:45.788809 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:45 crc kubenswrapper[4784]: I0106 09:47:45.865806 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:46 crc kubenswrapper[4784]: I0106 09:47:46.399392 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:46 crc kubenswrapper[4784]: I0106 09:47:46.469133 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bv8qc"] Jan 06 09:47:48 crc kubenswrapper[4784]: I0106 09:47:48.338395 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bv8qc" podUID="8038e011-c551-4261-bcbd-d372fb530695" containerName="registry-server" containerID="cri-o://9020cfdb4de626df9879d7966953e62e1b687fb5ee99276fd0ac3c4964e0da6d" gracePeriod=2 Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.183265 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.348673 4784 generic.go:334] "Generic (PLEG): container finished" podID="8038e011-c551-4261-bcbd-d372fb530695" containerID="9020cfdb4de626df9879d7966953e62e1b687fb5ee99276fd0ac3c4964e0da6d" exitCode=0 Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.348735 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bv8qc" event={"ID":"8038e011-c551-4261-bcbd-d372fb530695","Type":"ContainerDied","Data":"9020cfdb4de626df9879d7966953e62e1b687fb5ee99276fd0ac3c4964e0da6d"} Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.348762 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bv8qc" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.348788 4784 scope.go:117] "RemoveContainer" containerID="9020cfdb4de626df9879d7966953e62e1b687fb5ee99276fd0ac3c4964e0da6d" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.348771 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bv8qc" event={"ID":"8038e011-c551-4261-bcbd-d372fb530695","Type":"ContainerDied","Data":"48415b76014f4a3293f82cbd017730f742272fabfc60b7df4bb91333ae8dd1f7"} Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.349576 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gktd7\" (UniqueName: \"kubernetes.io/projected/8038e011-c551-4261-bcbd-d372fb530695-kube-api-access-gktd7\") pod \"8038e011-c551-4261-bcbd-d372fb530695\" (UID: \"8038e011-c551-4261-bcbd-d372fb530695\") " Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.349764 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8038e011-c551-4261-bcbd-d372fb530695-utilities\") pod \"8038e011-c551-4261-bcbd-d372fb530695\" (UID: \"8038e011-c551-4261-bcbd-d372fb530695\") " Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.349837 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8038e011-c551-4261-bcbd-d372fb530695-catalog-content\") pod \"8038e011-c551-4261-bcbd-d372fb530695\" (UID: \"8038e011-c551-4261-bcbd-d372fb530695\") " Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.351042 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8038e011-c551-4261-bcbd-d372fb530695-utilities" (OuterVolumeSpecName: "utilities") pod "8038e011-c551-4261-bcbd-d372fb530695" (UID: "8038e011-c551-4261-bcbd-d372fb530695"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.364638 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8038e011-c551-4261-bcbd-d372fb530695-kube-api-access-gktd7" (OuterVolumeSpecName: "kube-api-access-gktd7") pod "8038e011-c551-4261-bcbd-d372fb530695" (UID: "8038e011-c551-4261-bcbd-d372fb530695"). InnerVolumeSpecName "kube-api-access-gktd7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.397942 4784 scope.go:117] "RemoveContainer" containerID="6bc3bd7061fefaff07a992f453a064e06357318ec4c9241fb86314c25c541c54" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.423322 4784 scope.go:117] "RemoveContainer" containerID="db1962be4b1cc17a84905c933b31dde05f8a6a2e989ac47ae4062e8d8135c50b" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.438952 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8038e011-c551-4261-bcbd-d372fb530695-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8038e011-c551-4261-bcbd-d372fb530695" (UID: "8038e011-c551-4261-bcbd-d372fb530695"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.451820 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gktd7\" (UniqueName: \"kubernetes.io/projected/8038e011-c551-4261-bcbd-d372fb530695-kube-api-access-gktd7\") on node \"crc\" DevicePath \"\"" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.451858 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8038e011-c551-4261-bcbd-d372fb530695-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.451872 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8038e011-c551-4261-bcbd-d372fb530695-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.469944 4784 scope.go:117] "RemoveContainer" containerID="9020cfdb4de626df9879d7966953e62e1b687fb5ee99276fd0ac3c4964e0da6d" Jan 06 09:47:49 crc kubenswrapper[4784]: E0106 09:47:49.470376 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9020cfdb4de626df9879d7966953e62e1b687fb5ee99276fd0ac3c4964e0da6d\": container with ID starting with 9020cfdb4de626df9879d7966953e62e1b687fb5ee99276fd0ac3c4964e0da6d not found: ID does not exist" containerID="9020cfdb4de626df9879d7966953e62e1b687fb5ee99276fd0ac3c4964e0da6d" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.470411 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9020cfdb4de626df9879d7966953e62e1b687fb5ee99276fd0ac3c4964e0da6d"} err="failed to get container status \"9020cfdb4de626df9879d7966953e62e1b687fb5ee99276fd0ac3c4964e0da6d\": rpc error: code = NotFound desc = could not find container \"9020cfdb4de626df9879d7966953e62e1b687fb5ee99276fd0ac3c4964e0da6d\": container with ID starting with 9020cfdb4de626df9879d7966953e62e1b687fb5ee99276fd0ac3c4964e0da6d not found: ID does not exist" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.470435 4784 scope.go:117] "RemoveContainer" containerID="6bc3bd7061fefaff07a992f453a064e06357318ec4c9241fb86314c25c541c54" Jan 06 09:47:49 crc kubenswrapper[4784]: E0106 09:47:49.471211 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6bc3bd7061fefaff07a992f453a064e06357318ec4c9241fb86314c25c541c54\": container with ID starting with 6bc3bd7061fefaff07a992f453a064e06357318ec4c9241fb86314c25c541c54 not found: ID does not exist" containerID="6bc3bd7061fefaff07a992f453a064e06357318ec4c9241fb86314c25c541c54" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.471243 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bc3bd7061fefaff07a992f453a064e06357318ec4c9241fb86314c25c541c54"} err="failed to get container status \"6bc3bd7061fefaff07a992f453a064e06357318ec4c9241fb86314c25c541c54\": rpc error: code = NotFound desc = could not find container \"6bc3bd7061fefaff07a992f453a064e06357318ec4c9241fb86314c25c541c54\": container with ID starting with 6bc3bd7061fefaff07a992f453a064e06357318ec4c9241fb86314c25c541c54 not found: ID does not exist" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.471262 4784 scope.go:117] "RemoveContainer" containerID="db1962be4b1cc17a84905c933b31dde05f8a6a2e989ac47ae4062e8d8135c50b" Jan 06 09:47:49 crc kubenswrapper[4784]: E0106 09:47:49.471734 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db1962be4b1cc17a84905c933b31dde05f8a6a2e989ac47ae4062e8d8135c50b\": container with ID starting with db1962be4b1cc17a84905c933b31dde05f8a6a2e989ac47ae4062e8d8135c50b not found: ID does not exist" containerID="db1962be4b1cc17a84905c933b31dde05f8a6a2e989ac47ae4062e8d8135c50b" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.471758 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db1962be4b1cc17a84905c933b31dde05f8a6a2e989ac47ae4062e8d8135c50b"} err="failed to get container status \"db1962be4b1cc17a84905c933b31dde05f8a6a2e989ac47ae4062e8d8135c50b\": rpc error: code = NotFound desc = could not find container \"db1962be4b1cc17a84905c933b31dde05f8a6a2e989ac47ae4062e8d8135c50b\": container with ID starting with db1962be4b1cc17a84905c933b31dde05f8a6a2e989ac47ae4062e8d8135c50b not found: ID does not exist" Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.707754 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bv8qc"] Jan 06 09:47:49 crc kubenswrapper[4784]: I0106 09:47:49.719493 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bv8qc"] Jan 06 09:47:50 crc kubenswrapper[4784]: I0106 09:47:50.325631 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8038e011-c551-4261-bcbd-d372fb530695" path="/var/lib/kubelet/pods/8038e011-c551-4261-bcbd-d372fb530695/volumes" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.120151 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-n2z2w"] Jan 06 09:48:08 crc kubenswrapper[4784]: E0106 09:48:08.121648 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8038e011-c551-4261-bcbd-d372fb530695" containerName="extract-utilities" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.121675 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8038e011-c551-4261-bcbd-d372fb530695" containerName="extract-utilities" Jan 06 09:48:08 crc kubenswrapper[4784]: E0106 09:48:08.121713 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8038e011-c551-4261-bcbd-d372fb530695" containerName="extract-content" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.121728 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8038e011-c551-4261-bcbd-d372fb530695" containerName="extract-content" Jan 06 09:48:08 crc kubenswrapper[4784]: E0106 09:48:08.121746 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8038e011-c551-4261-bcbd-d372fb530695" containerName="registry-server" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.121764 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="8038e011-c551-4261-bcbd-d372fb530695" containerName="registry-server" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.122081 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="8038e011-c551-4261-bcbd-d372fb530695" containerName="registry-server" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.124351 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.130601 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n2z2w"] Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.245726 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b407da0-08b7-4aca-a9a1-ecca065f8a90-catalog-content\") pod \"certified-operators-n2z2w\" (UID: \"6b407da0-08b7-4aca-a9a1-ecca065f8a90\") " pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.245787 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jk9wc\" (UniqueName: \"kubernetes.io/projected/6b407da0-08b7-4aca-a9a1-ecca065f8a90-kube-api-access-jk9wc\") pod \"certified-operators-n2z2w\" (UID: \"6b407da0-08b7-4aca-a9a1-ecca065f8a90\") " pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.245929 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b407da0-08b7-4aca-a9a1-ecca065f8a90-utilities\") pod \"certified-operators-n2z2w\" (UID: \"6b407da0-08b7-4aca-a9a1-ecca065f8a90\") " pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.372220 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jk9wc\" (UniqueName: \"kubernetes.io/projected/6b407da0-08b7-4aca-a9a1-ecca065f8a90-kube-api-access-jk9wc\") pod \"certified-operators-n2z2w\" (UID: \"6b407da0-08b7-4aca-a9a1-ecca065f8a90\") " pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.372520 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b407da0-08b7-4aca-a9a1-ecca065f8a90-utilities\") pod \"certified-operators-n2z2w\" (UID: \"6b407da0-08b7-4aca-a9a1-ecca065f8a90\") " pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.372997 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b407da0-08b7-4aca-a9a1-ecca065f8a90-catalog-content\") pod \"certified-operators-n2z2w\" (UID: \"6b407da0-08b7-4aca-a9a1-ecca065f8a90\") " pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.373331 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b407da0-08b7-4aca-a9a1-ecca065f8a90-utilities\") pod \"certified-operators-n2z2w\" (UID: \"6b407da0-08b7-4aca-a9a1-ecca065f8a90\") " pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.373898 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b407da0-08b7-4aca-a9a1-ecca065f8a90-catalog-content\") pod \"certified-operators-n2z2w\" (UID: \"6b407da0-08b7-4aca-a9a1-ecca065f8a90\") " pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.392969 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jk9wc\" (UniqueName: \"kubernetes.io/projected/6b407da0-08b7-4aca-a9a1-ecca065f8a90-kube-api-access-jk9wc\") pod \"certified-operators-n2z2w\" (UID: \"6b407da0-08b7-4aca-a9a1-ecca065f8a90\") " pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:08 crc kubenswrapper[4784]: I0106 09:48:08.461087 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:09 crc kubenswrapper[4784]: I0106 09:48:09.026614 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n2z2w"] Jan 06 09:48:09 crc kubenswrapper[4784]: I0106 09:48:09.640430 4784 generic.go:334] "Generic (PLEG): container finished" podID="6b407da0-08b7-4aca-a9a1-ecca065f8a90" containerID="478d31958d4eb410cbf4bd46153fb818c1b99b04205a7e874b11ea7a2c12f2bc" exitCode=0 Jan 06 09:48:09 crc kubenswrapper[4784]: I0106 09:48:09.640512 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n2z2w" event={"ID":"6b407da0-08b7-4aca-a9a1-ecca065f8a90","Type":"ContainerDied","Data":"478d31958d4eb410cbf4bd46153fb818c1b99b04205a7e874b11ea7a2c12f2bc"} Jan 06 09:48:09 crc kubenswrapper[4784]: I0106 09:48:09.640596 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n2z2w" event={"ID":"6b407da0-08b7-4aca-a9a1-ecca065f8a90","Type":"ContainerStarted","Data":"6d26477b207265b5dd6a15f69db55bef63b4bee802a4f55acd5c91054feb9652"} Jan 06 09:48:10 crc kubenswrapper[4784]: I0106 09:48:10.659018 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n2z2w" event={"ID":"6b407da0-08b7-4aca-a9a1-ecca065f8a90","Type":"ContainerStarted","Data":"9059ea68abb9aa32e6c562e535395a491fb3e0bc501f3c41d06d6c8bd624d762"} Jan 06 09:48:11 crc kubenswrapper[4784]: I0106 09:48:11.673085 4784 generic.go:334] "Generic (PLEG): container finished" podID="6b407da0-08b7-4aca-a9a1-ecca065f8a90" containerID="9059ea68abb9aa32e6c562e535395a491fb3e0bc501f3c41d06d6c8bd624d762" exitCode=0 Jan 06 09:48:11 crc kubenswrapper[4784]: I0106 09:48:11.673508 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n2z2w" event={"ID":"6b407da0-08b7-4aca-a9a1-ecca065f8a90","Type":"ContainerDied","Data":"9059ea68abb9aa32e6c562e535395a491fb3e0bc501f3c41d06d6c8bd624d762"} Jan 06 09:48:12 crc kubenswrapper[4784]: I0106 09:48:12.687242 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n2z2w" event={"ID":"6b407da0-08b7-4aca-a9a1-ecca065f8a90","Type":"ContainerStarted","Data":"f40628fbaa0f4e82e5ba0a2f0d595d6a47fdcba4a0c5adbeb3c5f4dc1d699b50"} Jan 06 09:48:12 crc kubenswrapper[4784]: I0106 09:48:12.721626 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-n2z2w" podStartSLOduration=2.273731794 podStartE2EDuration="4.721597934s" podCreationTimestamp="2026-01-06 09:48:08 +0000 UTC" firstStartedPulling="2026-01-06 09:48:09.642840596 +0000 UTC m=+5591.689013453" lastFinishedPulling="2026-01-06 09:48:12.090706716 +0000 UTC m=+5594.136879593" observedRunningTime="2026-01-06 09:48:12.714492934 +0000 UTC m=+5594.760665821" watchObservedRunningTime="2026-01-06 09:48:12.721597934 +0000 UTC m=+5594.767770811" Jan 06 09:48:14 crc kubenswrapper[4784]: I0106 09:48:14.350844 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:48:14 crc kubenswrapper[4784]: I0106 09:48:14.351328 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:48:14 crc kubenswrapper[4784]: I0106 09:48:14.351389 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 09:48:14 crc kubenswrapper[4784]: I0106 09:48:14.352611 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"72e2ee1df1508c45b4af0f614c4678cd408cada5da9dc40a3583054dd2332a7e"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 09:48:14 crc kubenswrapper[4784]: I0106 09:48:14.352743 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://72e2ee1df1508c45b4af0f614c4678cd408cada5da9dc40a3583054dd2332a7e" gracePeriod=600 Jan 06 09:48:14 crc kubenswrapper[4784]: I0106 09:48:14.717364 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="72e2ee1df1508c45b4af0f614c4678cd408cada5da9dc40a3583054dd2332a7e" exitCode=0 Jan 06 09:48:14 crc kubenswrapper[4784]: I0106 09:48:14.717455 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"72e2ee1df1508c45b4af0f614c4678cd408cada5da9dc40a3583054dd2332a7e"} Jan 06 09:48:14 crc kubenswrapper[4784]: I0106 09:48:14.718000 4784 scope.go:117] "RemoveContainer" containerID="d6835f9ebfc6343c4e52d255ffaebc6d29f9f63e21413bd05d6e8eeafe27628f" Jan 06 09:48:15 crc kubenswrapper[4784]: I0106 09:48:15.730387 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8"} Jan 06 09:48:17 crc kubenswrapper[4784]: I0106 09:48:17.669006 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fps7m"] Jan 06 09:48:17 crc kubenswrapper[4784]: I0106 09:48:17.671982 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:17 crc kubenswrapper[4784]: I0106 09:48:17.694164 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fps7m"] Jan 06 09:48:17 crc kubenswrapper[4784]: I0106 09:48:17.759696 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5zw8\" (UniqueName: \"kubernetes.io/projected/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-kube-api-access-p5zw8\") pod \"redhat-operators-fps7m\" (UID: \"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48\") " pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:17 crc kubenswrapper[4784]: I0106 09:48:17.759744 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-utilities\") pod \"redhat-operators-fps7m\" (UID: \"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48\") " pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:17 crc kubenswrapper[4784]: I0106 09:48:17.760315 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-catalog-content\") pod \"redhat-operators-fps7m\" (UID: \"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48\") " pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:17 crc kubenswrapper[4784]: I0106 09:48:17.862363 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-catalog-content\") pod \"redhat-operators-fps7m\" (UID: \"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48\") " pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:17 crc kubenswrapper[4784]: I0106 09:48:17.862435 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5zw8\" (UniqueName: \"kubernetes.io/projected/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-kube-api-access-p5zw8\") pod \"redhat-operators-fps7m\" (UID: \"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48\") " pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:17 crc kubenswrapper[4784]: I0106 09:48:17.862467 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-utilities\") pod \"redhat-operators-fps7m\" (UID: \"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48\") " pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:17 crc kubenswrapper[4784]: I0106 09:48:17.862978 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-catalog-content\") pod \"redhat-operators-fps7m\" (UID: \"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48\") " pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:17 crc kubenswrapper[4784]: I0106 09:48:17.863048 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-utilities\") pod \"redhat-operators-fps7m\" (UID: \"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48\") " pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:17 crc kubenswrapper[4784]: I0106 09:48:17.894935 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5zw8\" (UniqueName: \"kubernetes.io/projected/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-kube-api-access-p5zw8\") pod \"redhat-operators-fps7m\" (UID: \"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48\") " pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:18 crc kubenswrapper[4784]: I0106 09:48:18.007249 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:18 crc kubenswrapper[4784]: I0106 09:48:18.461611 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:18 crc kubenswrapper[4784]: I0106 09:48:18.461679 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:18 crc kubenswrapper[4784]: I0106 09:48:18.470864 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fps7m"] Jan 06 09:48:18 crc kubenswrapper[4784]: I0106 09:48:18.532724 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:18 crc kubenswrapper[4784]: I0106 09:48:18.759828 4784 generic.go:334] "Generic (PLEG): container finished" podID="9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" containerID="f2abf14553a1400c5c5b6427f11806c43966522b038585e4e7786905d4637ba2" exitCode=0 Jan 06 09:48:18 crc kubenswrapper[4784]: I0106 09:48:18.759934 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fps7m" event={"ID":"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48","Type":"ContainerDied","Data":"f2abf14553a1400c5c5b6427f11806c43966522b038585e4e7786905d4637ba2"} Jan 06 09:48:18 crc kubenswrapper[4784]: I0106 09:48:18.760070 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fps7m" event={"ID":"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48","Type":"ContainerStarted","Data":"d492895a260d3b94e0f4728d97ca8860bc96ea6678f6b127dda190b352a412cf"} Jan 06 09:48:18 crc kubenswrapper[4784]: I0106 09:48:18.825643 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:19 crc kubenswrapper[4784]: I0106 09:48:19.774934 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fps7m" event={"ID":"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48","Type":"ContainerStarted","Data":"4743dffbf7ca5ca6f6806215e7ac03409296ca4764fa0eacba7f51f838d74bae"} Jan 06 09:48:20 crc kubenswrapper[4784]: I0106 09:48:20.796657 4784 generic.go:334] "Generic (PLEG): container finished" podID="9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" containerID="4743dffbf7ca5ca6f6806215e7ac03409296ca4764fa0eacba7f51f838d74bae" exitCode=0 Jan 06 09:48:20 crc kubenswrapper[4784]: I0106 09:48:20.796747 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fps7m" event={"ID":"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48","Type":"ContainerDied","Data":"4743dffbf7ca5ca6f6806215e7ac03409296ca4764fa0eacba7f51f838d74bae"} Jan 06 09:48:20 crc kubenswrapper[4784]: I0106 09:48:20.811663 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-n2z2w"] Jan 06 09:48:20 crc kubenswrapper[4784]: I0106 09:48:20.811960 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-n2z2w" podUID="6b407da0-08b7-4aca-a9a1-ecca065f8a90" containerName="registry-server" containerID="cri-o://f40628fbaa0f4e82e5ba0a2f0d595d6a47fdcba4a0c5adbeb3c5f4dc1d699b50" gracePeriod=2 Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.332164 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.431727 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b407da0-08b7-4aca-a9a1-ecca065f8a90-utilities\") pod \"6b407da0-08b7-4aca-a9a1-ecca065f8a90\" (UID: \"6b407da0-08b7-4aca-a9a1-ecca065f8a90\") " Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.431852 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jk9wc\" (UniqueName: \"kubernetes.io/projected/6b407da0-08b7-4aca-a9a1-ecca065f8a90-kube-api-access-jk9wc\") pod \"6b407da0-08b7-4aca-a9a1-ecca065f8a90\" (UID: \"6b407da0-08b7-4aca-a9a1-ecca065f8a90\") " Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.431935 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b407da0-08b7-4aca-a9a1-ecca065f8a90-catalog-content\") pod \"6b407da0-08b7-4aca-a9a1-ecca065f8a90\" (UID: \"6b407da0-08b7-4aca-a9a1-ecca065f8a90\") " Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.432854 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b407da0-08b7-4aca-a9a1-ecca065f8a90-utilities" (OuterVolumeSpecName: "utilities") pod "6b407da0-08b7-4aca-a9a1-ecca065f8a90" (UID: "6b407da0-08b7-4aca-a9a1-ecca065f8a90"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.438161 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b407da0-08b7-4aca-a9a1-ecca065f8a90-kube-api-access-jk9wc" (OuterVolumeSpecName: "kube-api-access-jk9wc") pod "6b407da0-08b7-4aca-a9a1-ecca065f8a90" (UID: "6b407da0-08b7-4aca-a9a1-ecca065f8a90"). InnerVolumeSpecName "kube-api-access-jk9wc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.533613 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jk9wc\" (UniqueName: \"kubernetes.io/projected/6b407da0-08b7-4aca-a9a1-ecca065f8a90-kube-api-access-jk9wc\") on node \"crc\" DevicePath \"\"" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.533951 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b407da0-08b7-4aca-a9a1-ecca065f8a90-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.786650 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b407da0-08b7-4aca-a9a1-ecca065f8a90-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6b407da0-08b7-4aca-a9a1-ecca065f8a90" (UID: "6b407da0-08b7-4aca-a9a1-ecca065f8a90"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.810858 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fps7m" event={"ID":"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48","Type":"ContainerStarted","Data":"2b35127f2e5a24ef06deeb044686ef243a87cca63bf92dbe78f665041cfb4eb0"} Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.816340 4784 generic.go:334] "Generic (PLEG): container finished" podID="6b407da0-08b7-4aca-a9a1-ecca065f8a90" containerID="f40628fbaa0f4e82e5ba0a2f0d595d6a47fdcba4a0c5adbeb3c5f4dc1d699b50" exitCode=0 Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.816412 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n2z2w" event={"ID":"6b407da0-08b7-4aca-a9a1-ecca065f8a90","Type":"ContainerDied","Data":"f40628fbaa0f4e82e5ba0a2f0d595d6a47fdcba4a0c5adbeb3c5f4dc1d699b50"} Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.816467 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n2z2w" event={"ID":"6b407da0-08b7-4aca-a9a1-ecca065f8a90","Type":"ContainerDied","Data":"6d26477b207265b5dd6a15f69db55bef63b4bee802a4f55acd5c91054feb9652"} Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.816486 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n2z2w" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.816498 4784 scope.go:117] "RemoveContainer" containerID="f40628fbaa0f4e82e5ba0a2f0d595d6a47fdcba4a0c5adbeb3c5f4dc1d699b50" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.839219 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b407da0-08b7-4aca-a9a1-ecca065f8a90-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.842399 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fps7m" podStartSLOduration=2.20054723 podStartE2EDuration="4.842384646s" podCreationTimestamp="2026-01-06 09:48:17 +0000 UTC" firstStartedPulling="2026-01-06 09:48:18.761190433 +0000 UTC m=+5600.807363270" lastFinishedPulling="2026-01-06 09:48:21.403027839 +0000 UTC m=+5603.449200686" observedRunningTime="2026-01-06 09:48:21.835520794 +0000 UTC m=+5603.881693661" watchObservedRunningTime="2026-01-06 09:48:21.842384646 +0000 UTC m=+5603.888557493" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.858229 4784 scope.go:117] "RemoveContainer" containerID="9059ea68abb9aa32e6c562e535395a491fb3e0bc501f3c41d06d6c8bd624d762" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.879458 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-n2z2w"] Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.888884 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-n2z2w"] Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.895812 4784 scope.go:117] "RemoveContainer" containerID="478d31958d4eb410cbf4bd46153fb818c1b99b04205a7e874b11ea7a2c12f2bc" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.916289 4784 scope.go:117] "RemoveContainer" containerID="f40628fbaa0f4e82e5ba0a2f0d595d6a47fdcba4a0c5adbeb3c5f4dc1d699b50" Jan 06 09:48:21 crc kubenswrapper[4784]: E0106 09:48:21.916834 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f40628fbaa0f4e82e5ba0a2f0d595d6a47fdcba4a0c5adbeb3c5f4dc1d699b50\": container with ID starting with f40628fbaa0f4e82e5ba0a2f0d595d6a47fdcba4a0c5adbeb3c5f4dc1d699b50 not found: ID does not exist" containerID="f40628fbaa0f4e82e5ba0a2f0d595d6a47fdcba4a0c5adbeb3c5f4dc1d699b50" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.916898 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f40628fbaa0f4e82e5ba0a2f0d595d6a47fdcba4a0c5adbeb3c5f4dc1d699b50"} err="failed to get container status \"f40628fbaa0f4e82e5ba0a2f0d595d6a47fdcba4a0c5adbeb3c5f4dc1d699b50\": rpc error: code = NotFound desc = could not find container \"f40628fbaa0f4e82e5ba0a2f0d595d6a47fdcba4a0c5adbeb3c5f4dc1d699b50\": container with ID starting with f40628fbaa0f4e82e5ba0a2f0d595d6a47fdcba4a0c5adbeb3c5f4dc1d699b50 not found: ID does not exist" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.916942 4784 scope.go:117] "RemoveContainer" containerID="9059ea68abb9aa32e6c562e535395a491fb3e0bc501f3c41d06d6c8bd624d762" Jan 06 09:48:21 crc kubenswrapper[4784]: E0106 09:48:21.917367 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9059ea68abb9aa32e6c562e535395a491fb3e0bc501f3c41d06d6c8bd624d762\": container with ID starting with 9059ea68abb9aa32e6c562e535395a491fb3e0bc501f3c41d06d6c8bd624d762 not found: ID does not exist" containerID="9059ea68abb9aa32e6c562e535395a491fb3e0bc501f3c41d06d6c8bd624d762" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.917441 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9059ea68abb9aa32e6c562e535395a491fb3e0bc501f3c41d06d6c8bd624d762"} err="failed to get container status \"9059ea68abb9aa32e6c562e535395a491fb3e0bc501f3c41d06d6c8bd624d762\": rpc error: code = NotFound desc = could not find container \"9059ea68abb9aa32e6c562e535395a491fb3e0bc501f3c41d06d6c8bd624d762\": container with ID starting with 9059ea68abb9aa32e6c562e535395a491fb3e0bc501f3c41d06d6c8bd624d762 not found: ID does not exist" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.917507 4784 scope.go:117] "RemoveContainer" containerID="478d31958d4eb410cbf4bd46153fb818c1b99b04205a7e874b11ea7a2c12f2bc" Jan 06 09:48:21 crc kubenswrapper[4784]: E0106 09:48:21.918007 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"478d31958d4eb410cbf4bd46153fb818c1b99b04205a7e874b11ea7a2c12f2bc\": container with ID starting with 478d31958d4eb410cbf4bd46153fb818c1b99b04205a7e874b11ea7a2c12f2bc not found: ID does not exist" containerID="478d31958d4eb410cbf4bd46153fb818c1b99b04205a7e874b11ea7a2c12f2bc" Jan 06 09:48:21 crc kubenswrapper[4784]: I0106 09:48:21.918074 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"478d31958d4eb410cbf4bd46153fb818c1b99b04205a7e874b11ea7a2c12f2bc"} err="failed to get container status \"478d31958d4eb410cbf4bd46153fb818c1b99b04205a7e874b11ea7a2c12f2bc\": rpc error: code = NotFound desc = could not find container \"478d31958d4eb410cbf4bd46153fb818c1b99b04205a7e874b11ea7a2c12f2bc\": container with ID starting with 478d31958d4eb410cbf4bd46153fb818c1b99b04205a7e874b11ea7a2c12f2bc not found: ID does not exist" Jan 06 09:48:22 crc kubenswrapper[4784]: I0106 09:48:22.324985 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b407da0-08b7-4aca-a9a1-ecca065f8a90" path="/var/lib/kubelet/pods/6b407da0-08b7-4aca-a9a1-ecca065f8a90/volumes" Jan 06 09:48:28 crc kubenswrapper[4784]: I0106 09:48:28.007380 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:28 crc kubenswrapper[4784]: I0106 09:48:28.008051 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:29 crc kubenswrapper[4784]: I0106 09:48:29.074059 4784 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fps7m" podUID="9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" containerName="registry-server" probeResult="failure" output=< Jan 06 09:48:29 crc kubenswrapper[4784]: timeout: failed to connect service ":50051" within 1s Jan 06 09:48:29 crc kubenswrapper[4784]: > Jan 06 09:48:38 crc kubenswrapper[4784]: I0106 09:48:38.086900 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:38 crc kubenswrapper[4784]: I0106 09:48:38.174344 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:39 crc kubenswrapper[4784]: I0106 09:48:39.316397 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fps7m"] Jan 06 09:48:40 crc kubenswrapper[4784]: I0106 09:48:40.029027 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fps7m" podUID="9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" containerName="registry-server" containerID="cri-o://2b35127f2e5a24ef06deeb044686ef243a87cca63bf92dbe78f665041cfb4eb0" gracePeriod=2 Jan 06 09:48:40 crc kubenswrapper[4784]: I0106 09:48:40.588962 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:40 crc kubenswrapper[4784]: I0106 09:48:40.623485 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5zw8\" (UniqueName: \"kubernetes.io/projected/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-kube-api-access-p5zw8\") pod \"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48\" (UID: \"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48\") " Jan 06 09:48:40 crc kubenswrapper[4784]: I0106 09:48:40.623609 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-utilities\") pod \"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48\" (UID: \"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48\") " Jan 06 09:48:40 crc kubenswrapper[4784]: I0106 09:48:40.623652 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-catalog-content\") pod \"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48\" (UID: \"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48\") " Jan 06 09:48:40 crc kubenswrapper[4784]: I0106 09:48:40.625526 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-utilities" (OuterVolumeSpecName: "utilities") pod "9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" (UID: "9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:48:40 crc kubenswrapper[4784]: I0106 09:48:40.633691 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-kube-api-access-p5zw8" (OuterVolumeSpecName: "kube-api-access-p5zw8") pod "9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" (UID: "9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48"). InnerVolumeSpecName "kube-api-access-p5zw8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:48:40 crc kubenswrapper[4784]: I0106 09:48:40.725772 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5zw8\" (UniqueName: \"kubernetes.io/projected/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-kube-api-access-p5zw8\") on node \"crc\" DevicePath \"\"" Jan 06 09:48:40 crc kubenswrapper[4784]: I0106 09:48:40.725824 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:48:40 crc kubenswrapper[4784]: I0106 09:48:40.786783 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" (UID: "9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:48:40 crc kubenswrapper[4784]: I0106 09:48:40.827611 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:48:41 crc kubenswrapper[4784]: I0106 09:48:41.044315 4784 generic.go:334] "Generic (PLEG): container finished" podID="9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" containerID="2b35127f2e5a24ef06deeb044686ef243a87cca63bf92dbe78f665041cfb4eb0" exitCode=0 Jan 06 09:48:41 crc kubenswrapper[4784]: I0106 09:48:41.044376 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fps7m" event={"ID":"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48","Type":"ContainerDied","Data":"2b35127f2e5a24ef06deeb044686ef243a87cca63bf92dbe78f665041cfb4eb0"} Jan 06 09:48:41 crc kubenswrapper[4784]: I0106 09:48:41.044431 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fps7m" event={"ID":"9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48","Type":"ContainerDied","Data":"d492895a260d3b94e0f4728d97ca8860bc96ea6678f6b127dda190b352a412cf"} Jan 06 09:48:41 crc kubenswrapper[4784]: I0106 09:48:41.044474 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fps7m" Jan 06 09:48:41 crc kubenswrapper[4784]: I0106 09:48:41.044482 4784 scope.go:117] "RemoveContainer" containerID="2b35127f2e5a24ef06deeb044686ef243a87cca63bf92dbe78f665041cfb4eb0" Jan 06 09:48:41 crc kubenswrapper[4784]: I0106 09:48:41.089755 4784 scope.go:117] "RemoveContainer" containerID="4743dffbf7ca5ca6f6806215e7ac03409296ca4764fa0eacba7f51f838d74bae" Jan 06 09:48:41 crc kubenswrapper[4784]: I0106 09:48:41.103025 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fps7m"] Jan 06 09:48:41 crc kubenswrapper[4784]: I0106 09:48:41.119499 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fps7m"] Jan 06 09:48:41 crc kubenswrapper[4784]: I0106 09:48:41.125401 4784 scope.go:117] "RemoveContainer" containerID="f2abf14553a1400c5c5b6427f11806c43966522b038585e4e7786905d4637ba2" Jan 06 09:48:41 crc kubenswrapper[4784]: I0106 09:48:41.177505 4784 scope.go:117] "RemoveContainer" containerID="2b35127f2e5a24ef06deeb044686ef243a87cca63bf92dbe78f665041cfb4eb0" Jan 06 09:48:41 crc kubenswrapper[4784]: E0106 09:48:41.178409 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b35127f2e5a24ef06deeb044686ef243a87cca63bf92dbe78f665041cfb4eb0\": container with ID starting with 2b35127f2e5a24ef06deeb044686ef243a87cca63bf92dbe78f665041cfb4eb0 not found: ID does not exist" containerID="2b35127f2e5a24ef06deeb044686ef243a87cca63bf92dbe78f665041cfb4eb0" Jan 06 09:48:41 crc kubenswrapper[4784]: I0106 09:48:41.178467 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b35127f2e5a24ef06deeb044686ef243a87cca63bf92dbe78f665041cfb4eb0"} err="failed to get container status \"2b35127f2e5a24ef06deeb044686ef243a87cca63bf92dbe78f665041cfb4eb0\": rpc error: code = NotFound desc = could not find container \"2b35127f2e5a24ef06deeb044686ef243a87cca63bf92dbe78f665041cfb4eb0\": container with ID starting with 2b35127f2e5a24ef06deeb044686ef243a87cca63bf92dbe78f665041cfb4eb0 not found: ID does not exist" Jan 06 09:48:41 crc kubenswrapper[4784]: I0106 09:48:41.178506 4784 scope.go:117] "RemoveContainer" containerID="4743dffbf7ca5ca6f6806215e7ac03409296ca4764fa0eacba7f51f838d74bae" Jan 06 09:48:41 crc kubenswrapper[4784]: E0106 09:48:41.179029 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4743dffbf7ca5ca6f6806215e7ac03409296ca4764fa0eacba7f51f838d74bae\": container with ID starting with 4743dffbf7ca5ca6f6806215e7ac03409296ca4764fa0eacba7f51f838d74bae not found: ID does not exist" containerID="4743dffbf7ca5ca6f6806215e7ac03409296ca4764fa0eacba7f51f838d74bae" Jan 06 09:48:41 crc kubenswrapper[4784]: I0106 09:48:41.179059 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4743dffbf7ca5ca6f6806215e7ac03409296ca4764fa0eacba7f51f838d74bae"} err="failed to get container status \"4743dffbf7ca5ca6f6806215e7ac03409296ca4764fa0eacba7f51f838d74bae\": rpc error: code = NotFound desc = could not find container \"4743dffbf7ca5ca6f6806215e7ac03409296ca4764fa0eacba7f51f838d74bae\": container with ID starting with 4743dffbf7ca5ca6f6806215e7ac03409296ca4764fa0eacba7f51f838d74bae not found: ID does not exist" Jan 06 09:48:41 crc kubenswrapper[4784]: I0106 09:48:41.179078 4784 scope.go:117] "RemoveContainer" containerID="f2abf14553a1400c5c5b6427f11806c43966522b038585e4e7786905d4637ba2" Jan 06 09:48:41 crc kubenswrapper[4784]: E0106 09:48:41.180376 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2abf14553a1400c5c5b6427f11806c43966522b038585e4e7786905d4637ba2\": container with ID starting with f2abf14553a1400c5c5b6427f11806c43966522b038585e4e7786905d4637ba2 not found: ID does not exist" containerID="f2abf14553a1400c5c5b6427f11806c43966522b038585e4e7786905d4637ba2" Jan 06 09:48:41 crc kubenswrapper[4784]: I0106 09:48:41.180662 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2abf14553a1400c5c5b6427f11806c43966522b038585e4e7786905d4637ba2"} err="failed to get container status \"f2abf14553a1400c5c5b6427f11806c43966522b038585e4e7786905d4637ba2\": rpc error: code = NotFound desc = could not find container \"f2abf14553a1400c5c5b6427f11806c43966522b038585e4e7786905d4637ba2\": container with ID starting with f2abf14553a1400c5c5b6427f11806c43966522b038585e4e7786905d4637ba2 not found: ID does not exist" Jan 06 09:48:42 crc kubenswrapper[4784]: I0106 09:48:42.330104 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" path="/var/lib/kubelet/pods/9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48/volumes" Jan 06 09:48:43 crc kubenswrapper[4784]: I0106 09:48:43.108945 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-cr2vz"] Jan 06 09:48:43 crc kubenswrapper[4784]: I0106 09:48:43.116049 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-cr2vz"] Jan 06 09:48:44 crc kubenswrapper[4784]: I0106 09:48:44.332115 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31150519-a40e-4c55-8b6d-a28ca67c6ff8" path="/var/lib/kubelet/pods/31150519-a40e-4c55-8b6d-a28ca67c6ff8/volumes" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.600093 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-xfxs6/must-gather-fk7md"] Jan 06 09:49:16 crc kubenswrapper[4784]: E0106 09:49:16.600840 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b407da0-08b7-4aca-a9a1-ecca065f8a90" containerName="extract-content" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.600852 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b407da0-08b7-4aca-a9a1-ecca065f8a90" containerName="extract-content" Jan 06 09:49:16 crc kubenswrapper[4784]: E0106 09:49:16.600866 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b407da0-08b7-4aca-a9a1-ecca065f8a90" containerName="extract-utilities" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.600872 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b407da0-08b7-4aca-a9a1-ecca065f8a90" containerName="extract-utilities" Jan 06 09:49:16 crc kubenswrapper[4784]: E0106 09:49:16.600893 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b407da0-08b7-4aca-a9a1-ecca065f8a90" containerName="registry-server" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.600898 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b407da0-08b7-4aca-a9a1-ecca065f8a90" containerName="registry-server" Jan 06 09:49:16 crc kubenswrapper[4784]: E0106 09:49:16.600907 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" containerName="extract-utilities" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.600913 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" containerName="extract-utilities" Jan 06 09:49:16 crc kubenswrapper[4784]: E0106 09:49:16.600923 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" containerName="registry-server" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.600929 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" containerName="registry-server" Jan 06 09:49:16 crc kubenswrapper[4784]: E0106 09:49:16.600943 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" containerName="extract-content" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.600948 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" containerName="extract-content" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.601076 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b407da0-08b7-4aca-a9a1-ecca065f8a90" containerName="registry-server" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.601094 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ac9c75c-ea54-4d2b-aecd-dfd608ec4c48" containerName="registry-server" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.601887 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xfxs6/must-gather-fk7md" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.603724 4784 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-xfxs6"/"default-dockercfg-2jgkn" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.603783 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-xfxs6"/"kube-root-ca.crt" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.604384 4784 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-xfxs6"/"openshift-service-ca.crt" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.616799 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-xfxs6/must-gather-fk7md"] Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.763941 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6hwp\" (UniqueName: \"kubernetes.io/projected/c9895028-22be-4f7e-b2a4-df8f3ff3fd4d-kube-api-access-k6hwp\") pod \"must-gather-fk7md\" (UID: \"c9895028-22be-4f7e-b2a4-df8f3ff3fd4d\") " pod="openshift-must-gather-xfxs6/must-gather-fk7md" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.764048 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c9895028-22be-4f7e-b2a4-df8f3ff3fd4d-must-gather-output\") pod \"must-gather-fk7md\" (UID: \"c9895028-22be-4f7e-b2a4-df8f3ff3fd4d\") " pod="openshift-must-gather-xfxs6/must-gather-fk7md" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.865446 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c9895028-22be-4f7e-b2a4-df8f3ff3fd4d-must-gather-output\") pod \"must-gather-fk7md\" (UID: \"c9895028-22be-4f7e-b2a4-df8f3ff3fd4d\") " pod="openshift-must-gather-xfxs6/must-gather-fk7md" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.865813 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c9895028-22be-4f7e-b2a4-df8f3ff3fd4d-must-gather-output\") pod \"must-gather-fk7md\" (UID: \"c9895028-22be-4f7e-b2a4-df8f3ff3fd4d\") " pod="openshift-must-gather-xfxs6/must-gather-fk7md" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.865955 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6hwp\" (UniqueName: \"kubernetes.io/projected/c9895028-22be-4f7e-b2a4-df8f3ff3fd4d-kube-api-access-k6hwp\") pod \"must-gather-fk7md\" (UID: \"c9895028-22be-4f7e-b2a4-df8f3ff3fd4d\") " pod="openshift-must-gather-xfxs6/must-gather-fk7md" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.917121 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6hwp\" (UniqueName: \"kubernetes.io/projected/c9895028-22be-4f7e-b2a4-df8f3ff3fd4d-kube-api-access-k6hwp\") pod \"must-gather-fk7md\" (UID: \"c9895028-22be-4f7e-b2a4-df8f3ff3fd4d\") " pod="openshift-must-gather-xfxs6/must-gather-fk7md" Jan 06 09:49:16 crc kubenswrapper[4784]: I0106 09:49:16.917761 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xfxs6/must-gather-fk7md" Jan 06 09:49:17 crc kubenswrapper[4784]: I0106 09:49:17.217313 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-xfxs6/must-gather-fk7md"] Jan 06 09:49:17 crc kubenswrapper[4784]: W0106 09:49:17.228798 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9895028_22be_4f7e_b2a4_df8f3ff3fd4d.slice/crio-035c380bcbb1e6dfcac30d48ee5d926dde5263629b7a2db8f437fc1dcb5dc59a WatchSource:0}: Error finding container 035c380bcbb1e6dfcac30d48ee5d926dde5263629b7a2db8f437fc1dcb5dc59a: Status 404 returned error can't find the container with id 035c380bcbb1e6dfcac30d48ee5d926dde5263629b7a2db8f437fc1dcb5dc59a Jan 06 09:49:17 crc kubenswrapper[4784]: I0106 09:49:17.444437 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xfxs6/must-gather-fk7md" event={"ID":"c9895028-22be-4f7e-b2a4-df8f3ff3fd4d","Type":"ContainerStarted","Data":"035c380bcbb1e6dfcac30d48ee5d926dde5263629b7a2db8f437fc1dcb5dc59a"} Jan 06 09:49:18 crc kubenswrapper[4784]: I0106 09:49:18.720863 4784 scope.go:117] "RemoveContainer" containerID="351cb6bf5de0e2f11e590ff4da3f121ca080e6b057765fd36e151b7da752691a" Jan 06 09:49:25 crc kubenswrapper[4784]: I0106 09:49:25.524851 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xfxs6/must-gather-fk7md" event={"ID":"c9895028-22be-4f7e-b2a4-df8f3ff3fd4d","Type":"ContainerStarted","Data":"018f56bec2edd0f67fcce7419ea6fccfc4a3603858903bdf37b98ff987360a57"} Jan 06 09:49:25 crc kubenswrapper[4784]: I0106 09:49:25.525269 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xfxs6/must-gather-fk7md" event={"ID":"c9895028-22be-4f7e-b2a4-df8f3ff3fd4d","Type":"ContainerStarted","Data":"0d7d545bd205026d88fdf2337829424f8546b541129ecd3149ca2df9dc9bc292"} Jan 06 09:49:25 crc kubenswrapper[4784]: I0106 09:49:25.545381 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-xfxs6/must-gather-fk7md" podStartSLOduration=2.096561231 podStartE2EDuration="9.545354266s" podCreationTimestamp="2026-01-06 09:49:16 +0000 UTC" firstStartedPulling="2026-01-06 09:49:17.230699626 +0000 UTC m=+5659.276872463" lastFinishedPulling="2026-01-06 09:49:24.679492661 +0000 UTC m=+5666.725665498" observedRunningTime="2026-01-06 09:49:25.543433495 +0000 UTC m=+5667.589606332" watchObservedRunningTime="2026-01-06 09:49:25.545354266 +0000 UTC m=+5667.591527143" Jan 06 09:49:27 crc kubenswrapper[4784]: I0106 09:49:27.667623 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-xfxs6/crc-debug-4q99l"] Jan 06 09:49:27 crc kubenswrapper[4784]: I0106 09:49:27.669241 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xfxs6/crc-debug-4q99l" Jan 06 09:49:27 crc kubenswrapper[4784]: I0106 09:49:27.688184 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/720f424c-9010-4e36-b189-9dceadde58f7-host\") pod \"crc-debug-4q99l\" (UID: \"720f424c-9010-4e36-b189-9dceadde58f7\") " pod="openshift-must-gather-xfxs6/crc-debug-4q99l" Jan 06 09:49:27 crc kubenswrapper[4784]: I0106 09:49:27.688229 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n45r9\" (UniqueName: \"kubernetes.io/projected/720f424c-9010-4e36-b189-9dceadde58f7-kube-api-access-n45r9\") pod \"crc-debug-4q99l\" (UID: \"720f424c-9010-4e36-b189-9dceadde58f7\") " pod="openshift-must-gather-xfxs6/crc-debug-4q99l" Jan 06 09:49:27 crc kubenswrapper[4784]: I0106 09:49:27.790094 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/720f424c-9010-4e36-b189-9dceadde58f7-host\") pod \"crc-debug-4q99l\" (UID: \"720f424c-9010-4e36-b189-9dceadde58f7\") " pod="openshift-must-gather-xfxs6/crc-debug-4q99l" Jan 06 09:49:27 crc kubenswrapper[4784]: I0106 09:49:27.790179 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n45r9\" (UniqueName: \"kubernetes.io/projected/720f424c-9010-4e36-b189-9dceadde58f7-kube-api-access-n45r9\") pod \"crc-debug-4q99l\" (UID: \"720f424c-9010-4e36-b189-9dceadde58f7\") " pod="openshift-must-gather-xfxs6/crc-debug-4q99l" Jan 06 09:49:27 crc kubenswrapper[4784]: I0106 09:49:27.790994 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/720f424c-9010-4e36-b189-9dceadde58f7-host\") pod \"crc-debug-4q99l\" (UID: \"720f424c-9010-4e36-b189-9dceadde58f7\") " pod="openshift-must-gather-xfxs6/crc-debug-4q99l" Jan 06 09:49:27 crc kubenswrapper[4784]: I0106 09:49:27.865372 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n45r9\" (UniqueName: \"kubernetes.io/projected/720f424c-9010-4e36-b189-9dceadde58f7-kube-api-access-n45r9\") pod \"crc-debug-4q99l\" (UID: \"720f424c-9010-4e36-b189-9dceadde58f7\") " pod="openshift-must-gather-xfxs6/crc-debug-4q99l" Jan 06 09:49:27 crc kubenswrapper[4784]: I0106 09:49:27.996870 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xfxs6/crc-debug-4q99l" Jan 06 09:49:28 crc kubenswrapper[4784]: I0106 09:49:28.550423 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xfxs6/crc-debug-4q99l" event={"ID":"720f424c-9010-4e36-b189-9dceadde58f7","Type":"ContainerStarted","Data":"19aadcc23247b986419f46ce61f1cc2266f1d2f78b94db0d834de3a76d0827d5"} Jan 06 09:49:39 crc kubenswrapper[4784]: I0106 09:49:39.662877 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xfxs6/crc-debug-4q99l" event={"ID":"720f424c-9010-4e36-b189-9dceadde58f7","Type":"ContainerStarted","Data":"2b1082795b11fded9cfcdea78de92e9ee1106ca386429c47d696842252125f8e"} Jan 06 09:49:39 crc kubenswrapper[4784]: I0106 09:49:39.690011 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-xfxs6/crc-debug-4q99l" podStartSLOduration=2.124375349 podStartE2EDuration="12.689987032s" podCreationTimestamp="2026-01-06 09:49:27 +0000 UTC" firstStartedPulling="2026-01-06 09:49:28.033290059 +0000 UTC m=+5670.079462896" lastFinishedPulling="2026-01-06 09:49:38.598901702 +0000 UTC m=+5680.645074579" observedRunningTime="2026-01-06 09:49:39.67991641 +0000 UTC m=+5681.726089257" watchObservedRunningTime="2026-01-06 09:49:39.689987032 +0000 UTC m=+5681.736159899" Jan 06 09:49:57 crc kubenswrapper[4784]: I0106 09:49:57.821972 4784 generic.go:334] "Generic (PLEG): container finished" podID="720f424c-9010-4e36-b189-9dceadde58f7" containerID="2b1082795b11fded9cfcdea78de92e9ee1106ca386429c47d696842252125f8e" exitCode=0 Jan 06 09:49:57 crc kubenswrapper[4784]: I0106 09:49:57.822047 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xfxs6/crc-debug-4q99l" event={"ID":"720f424c-9010-4e36-b189-9dceadde58f7","Type":"ContainerDied","Data":"2b1082795b11fded9cfcdea78de92e9ee1106ca386429c47d696842252125f8e"} Jan 06 09:49:58 crc kubenswrapper[4784]: I0106 09:49:58.955260 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xfxs6/crc-debug-4q99l" Jan 06 09:49:58 crc kubenswrapper[4784]: I0106 09:49:58.999407 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-xfxs6/crc-debug-4q99l"] Jan 06 09:49:59 crc kubenswrapper[4784]: I0106 09:49:58.999990 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n45r9\" (UniqueName: \"kubernetes.io/projected/720f424c-9010-4e36-b189-9dceadde58f7-kube-api-access-n45r9\") pod \"720f424c-9010-4e36-b189-9dceadde58f7\" (UID: \"720f424c-9010-4e36-b189-9dceadde58f7\") " Jan 06 09:49:59 crc kubenswrapper[4784]: I0106 09:49:59.000095 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/720f424c-9010-4e36-b189-9dceadde58f7-host\") pod \"720f424c-9010-4e36-b189-9dceadde58f7\" (UID: \"720f424c-9010-4e36-b189-9dceadde58f7\") " Jan 06 09:49:59 crc kubenswrapper[4784]: I0106 09:49:59.000203 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/720f424c-9010-4e36-b189-9dceadde58f7-host" (OuterVolumeSpecName: "host") pod "720f424c-9010-4e36-b189-9dceadde58f7" (UID: "720f424c-9010-4e36-b189-9dceadde58f7"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 09:49:59 crc kubenswrapper[4784]: I0106 09:49:59.000691 4784 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/720f424c-9010-4e36-b189-9dceadde58f7-host\") on node \"crc\" DevicePath \"\"" Jan 06 09:49:59 crc kubenswrapper[4784]: I0106 09:49:59.009244 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/720f424c-9010-4e36-b189-9dceadde58f7-kube-api-access-n45r9" (OuterVolumeSpecName: "kube-api-access-n45r9") pod "720f424c-9010-4e36-b189-9dceadde58f7" (UID: "720f424c-9010-4e36-b189-9dceadde58f7"). InnerVolumeSpecName "kube-api-access-n45r9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:49:59 crc kubenswrapper[4784]: I0106 09:49:59.010901 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-xfxs6/crc-debug-4q99l"] Jan 06 09:49:59 crc kubenswrapper[4784]: I0106 09:49:59.114393 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n45r9\" (UniqueName: \"kubernetes.io/projected/720f424c-9010-4e36-b189-9dceadde58f7-kube-api-access-n45r9\") on node \"crc\" DevicePath \"\"" Jan 06 09:49:59 crc kubenswrapper[4784]: I0106 09:49:59.846373 4784 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="19aadcc23247b986419f46ce61f1cc2266f1d2f78b94db0d834de3a76d0827d5" Jan 06 09:49:59 crc kubenswrapper[4784]: I0106 09:49:59.846527 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xfxs6/crc-debug-4q99l" Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.232499 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-xfxs6/crc-debug-jjhqm"] Jan 06 09:50:00 crc kubenswrapper[4784]: E0106 09:50:00.232975 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="720f424c-9010-4e36-b189-9dceadde58f7" containerName="container-00" Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.232996 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="720f424c-9010-4e36-b189-9dceadde58f7" containerName="container-00" Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.233233 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="720f424c-9010-4e36-b189-9dceadde58f7" containerName="container-00" Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.233931 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xfxs6/crc-debug-jjhqm" Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.320803 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="720f424c-9010-4e36-b189-9dceadde58f7" path="/var/lib/kubelet/pods/720f424c-9010-4e36-b189-9dceadde58f7/volumes" Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.338085 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/41288985-0381-4f8a-b6e3-304b8f66cdf5-host\") pod \"crc-debug-jjhqm\" (UID: \"41288985-0381-4f8a-b6e3-304b8f66cdf5\") " pod="openshift-must-gather-xfxs6/crc-debug-jjhqm" Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.338165 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4grs\" (UniqueName: \"kubernetes.io/projected/41288985-0381-4f8a-b6e3-304b8f66cdf5-kube-api-access-c4grs\") pod \"crc-debug-jjhqm\" (UID: \"41288985-0381-4f8a-b6e3-304b8f66cdf5\") " pod="openshift-must-gather-xfxs6/crc-debug-jjhqm" Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.440027 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/41288985-0381-4f8a-b6e3-304b8f66cdf5-host\") pod \"crc-debug-jjhqm\" (UID: \"41288985-0381-4f8a-b6e3-304b8f66cdf5\") " pod="openshift-must-gather-xfxs6/crc-debug-jjhqm" Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.440121 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/41288985-0381-4f8a-b6e3-304b8f66cdf5-host\") pod \"crc-debug-jjhqm\" (UID: \"41288985-0381-4f8a-b6e3-304b8f66cdf5\") " pod="openshift-must-gather-xfxs6/crc-debug-jjhqm" Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.440811 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4grs\" (UniqueName: \"kubernetes.io/projected/41288985-0381-4f8a-b6e3-304b8f66cdf5-kube-api-access-c4grs\") pod \"crc-debug-jjhqm\" (UID: \"41288985-0381-4f8a-b6e3-304b8f66cdf5\") " pod="openshift-must-gather-xfxs6/crc-debug-jjhqm" Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.461696 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4grs\" (UniqueName: \"kubernetes.io/projected/41288985-0381-4f8a-b6e3-304b8f66cdf5-kube-api-access-c4grs\") pod \"crc-debug-jjhqm\" (UID: \"41288985-0381-4f8a-b6e3-304b8f66cdf5\") " pod="openshift-must-gather-xfxs6/crc-debug-jjhqm" Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.550070 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xfxs6/crc-debug-jjhqm" Jan 06 09:50:00 crc kubenswrapper[4784]: W0106 09:50:00.571613 4784 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod41288985_0381_4f8a_b6e3_304b8f66cdf5.slice/crio-127e8a94c5a8f8fcfe5e26db8b1f54eb91a224cabbf2a2ff2f55de041d3a07d8 WatchSource:0}: Error finding container 127e8a94c5a8f8fcfe5e26db8b1f54eb91a224cabbf2a2ff2f55de041d3a07d8: Status 404 returned error can't find the container with id 127e8a94c5a8f8fcfe5e26db8b1f54eb91a224cabbf2a2ff2f55de041d3a07d8 Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.855615 4784 generic.go:334] "Generic (PLEG): container finished" podID="41288985-0381-4f8a-b6e3-304b8f66cdf5" containerID="09b599511b1099a5f64359e0790c540ece04dcd042006086ccbf1145b8850f9b" exitCode=1 Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.855655 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xfxs6/crc-debug-jjhqm" event={"ID":"41288985-0381-4f8a-b6e3-304b8f66cdf5","Type":"ContainerDied","Data":"09b599511b1099a5f64359e0790c540ece04dcd042006086ccbf1145b8850f9b"} Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.855682 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xfxs6/crc-debug-jjhqm" event={"ID":"41288985-0381-4f8a-b6e3-304b8f66cdf5","Type":"ContainerStarted","Data":"127e8a94c5a8f8fcfe5e26db8b1f54eb91a224cabbf2a2ff2f55de041d3a07d8"} Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.887206 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-xfxs6/crc-debug-jjhqm"] Jan 06 09:50:00 crc kubenswrapper[4784]: I0106 09:50:00.896693 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-xfxs6/crc-debug-jjhqm"] Jan 06 09:50:01 crc kubenswrapper[4784]: I0106 09:50:01.970388 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xfxs6/crc-debug-jjhqm" Jan 06 09:50:02 crc kubenswrapper[4784]: I0106 09:50:02.070155 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/41288985-0381-4f8a-b6e3-304b8f66cdf5-host\") pod \"41288985-0381-4f8a-b6e3-304b8f66cdf5\" (UID: \"41288985-0381-4f8a-b6e3-304b8f66cdf5\") " Jan 06 09:50:02 crc kubenswrapper[4784]: I0106 09:50:02.070275 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4grs\" (UniqueName: \"kubernetes.io/projected/41288985-0381-4f8a-b6e3-304b8f66cdf5-kube-api-access-c4grs\") pod \"41288985-0381-4f8a-b6e3-304b8f66cdf5\" (UID: \"41288985-0381-4f8a-b6e3-304b8f66cdf5\") " Jan 06 09:50:02 crc kubenswrapper[4784]: I0106 09:50:02.070312 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41288985-0381-4f8a-b6e3-304b8f66cdf5-host" (OuterVolumeSpecName: "host") pod "41288985-0381-4f8a-b6e3-304b8f66cdf5" (UID: "41288985-0381-4f8a-b6e3-304b8f66cdf5"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 06 09:50:02 crc kubenswrapper[4784]: I0106 09:50:02.070845 4784 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/41288985-0381-4f8a-b6e3-304b8f66cdf5-host\") on node \"crc\" DevicePath \"\"" Jan 06 09:50:02 crc kubenswrapper[4784]: I0106 09:50:02.077706 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41288985-0381-4f8a-b6e3-304b8f66cdf5-kube-api-access-c4grs" (OuterVolumeSpecName: "kube-api-access-c4grs") pod "41288985-0381-4f8a-b6e3-304b8f66cdf5" (UID: "41288985-0381-4f8a-b6e3-304b8f66cdf5"). InnerVolumeSpecName "kube-api-access-c4grs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:50:02 crc kubenswrapper[4784]: I0106 09:50:02.171935 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4grs\" (UniqueName: \"kubernetes.io/projected/41288985-0381-4f8a-b6e3-304b8f66cdf5-kube-api-access-c4grs\") on node \"crc\" DevicePath \"\"" Jan 06 09:50:02 crc kubenswrapper[4784]: I0106 09:50:02.324970 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41288985-0381-4f8a-b6e3-304b8f66cdf5" path="/var/lib/kubelet/pods/41288985-0381-4f8a-b6e3-304b8f66cdf5/volumes" Jan 06 09:50:02 crc kubenswrapper[4784]: I0106 09:50:02.882804 4784 scope.go:117] "RemoveContainer" containerID="09b599511b1099a5f64359e0790c540ece04dcd042006086ccbf1145b8850f9b" Jan 06 09:50:02 crc kubenswrapper[4784]: I0106 09:50:02.883145 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xfxs6/crc-debug-jjhqm" Jan 06 09:50:15 crc kubenswrapper[4784]: I0106 09:50:15.923296 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-75f555c9df-f87gg_a66f9730-85f2-42cc-8c48-765e2b138afe/init/0.log" Jan 06 09:50:16 crc kubenswrapper[4784]: I0106 09:50:16.083063 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-75f555c9df-f87gg_a66f9730-85f2-42cc-8c48-765e2b138afe/init/0.log" Jan 06 09:50:16 crc kubenswrapper[4784]: I0106 09:50:16.139372 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-75f555c9df-f87gg_a66f9730-85f2-42cc-8c48-765e2b138afe/dnsmasq-dns/0.log" Jan 06 09:50:16 crc kubenswrapper[4784]: I0106 09:50:16.262421 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-5d7dd7c46d-9hl9r_b361ca5e-b2d1-4330-a8ca-3174c44bf574/keystone-api/0.log" Jan 06 09:50:16 crc kubenswrapper[4784]: I0106 09:50:16.478247 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-b9e3-account-create-update-fc6gj_aaf0679d-d8f7-4e6b-bef2-23b9337d691d/mariadb-account-create-update/0.log" Jan 06 09:50:16 crc kubenswrapper[4784]: I0106 09:50:16.618398 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-bootstrap-pcbzh_b6620201-92ef-41a1-bab1-d72b4bb416b3/keystone-bootstrap/0.log" Jan 06 09:50:16 crc kubenswrapper[4784]: I0106 09:50:16.689122 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-db-create-9hlnc_f1a0eaf0-8041-4652-9c05-ab5208875034/mariadb-database-create/0.log" Jan 06 09:50:16 crc kubenswrapper[4784]: I0106 09:50:16.793965 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-db-sync-p8sgr_bcb83c22-8916-4c1c-976f-51c157f9b7db/keystone-db-sync/0.log" Jan 06 09:50:16 crc kubenswrapper[4784]: I0106 09:50:16.862845 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-copy-data_1305a97b-56fd-4686-bfe9-ee6901343241/adoption/0.log" Jan 06 09:50:17 crc kubenswrapper[4784]: I0106 09:50:17.093666 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0c62e4c3-2b3e-49bf-940c-0bfb4b23f066/mysql-bootstrap/0.log" Jan 06 09:50:17 crc kubenswrapper[4784]: I0106 09:50:17.298605 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0c62e4c3-2b3e-49bf-940c-0bfb4b23f066/galera/0.log" Jan 06 09:50:17 crc kubenswrapper[4784]: I0106 09:50:17.317423 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0c62e4c3-2b3e-49bf-940c-0bfb4b23f066/mysql-bootstrap/0.log" Jan 06 09:50:17 crc kubenswrapper[4784]: I0106 09:50:17.516985 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_693aaeed-5a0c-4230-a3e1-4b7b74a519cd/mysql-bootstrap/0.log" Jan 06 09:50:17 crc kubenswrapper[4784]: I0106 09:50:17.695696 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_693aaeed-5a0c-4230-a3e1-4b7b74a519cd/mysql-bootstrap/0.log" Jan 06 09:50:17 crc kubenswrapper[4784]: I0106 09:50:17.781252 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_693aaeed-5a0c-4230-a3e1-4b7b74a519cd/galera/0.log" Jan 06 09:50:17 crc kubenswrapper[4784]: I0106 09:50:17.946741 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_750e0544-586b-41c1-b843-709705e8d9e4/openstackclient/0.log" Jan 06 09:50:18 crc kubenswrapper[4784]: I0106 09:50:18.083353 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-copy-data_14658b5d-ac33-4c8e-a5a2-3166b7e1a5b8/adoption/0.log" Jan 06 09:50:18 crc kubenswrapper[4784]: I0106 09:50:18.131822 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_c7230cd6-8400-4e6f-b4fd-a086e38629b3/memcached/0.log" Jan 06 09:50:18 crc kubenswrapper[4784]: I0106 09:50:18.217511 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_0c462ba2-3550-4c14-a898-927acee7d1bc/openstack-network-exporter/0.log" Jan 06 09:50:18 crc kubenswrapper[4784]: I0106 09:50:18.300830 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_0c462ba2-3550-4c14-a898-927acee7d1bc/ovn-northd/0.log" Jan 06 09:50:18 crc kubenswrapper[4784]: I0106 09:50:18.367634 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_87d0ad06-efd9-4785-9dcd-6a215df2068e/openstack-network-exporter/0.log" Jan 06 09:50:18 crc kubenswrapper[4784]: I0106 09:50:18.401502 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_87d0ad06-efd9-4785-9dcd-6a215df2068e/ovsdbserver-nb/0.log" Jan 06 09:50:18 crc kubenswrapper[4784]: I0106 09:50:18.540267 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_0428c32c-f26c-4baa-8914-1079f1097a09/openstack-network-exporter/0.log" Jan 06 09:50:18 crc kubenswrapper[4784]: I0106 09:50:18.589021 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_0428c32c-f26c-4baa-8914-1079f1097a09/ovsdbserver-nb/0.log" Jan 06 09:50:18 crc kubenswrapper[4784]: I0106 09:50:18.731227 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_52b356d6-b34e-4eac-9f70-5138fe492d64/openstack-network-exporter/0.log" Jan 06 09:50:18 crc kubenswrapper[4784]: I0106 09:50:18.765896 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_52b356d6-b34e-4eac-9f70-5138fe492d64/ovsdbserver-nb/0.log" Jan 06 09:50:18 crc kubenswrapper[4784]: I0106 09:50:18.830091 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_672bf4f8-dc22-458b-a552-e60871c86e17/openstack-network-exporter/0.log" Jan 06 09:50:18 crc kubenswrapper[4784]: I0106 09:50:18.902330 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_672bf4f8-dc22-458b-a552-e60871c86e17/ovsdbserver-sb/0.log" Jan 06 09:50:18 crc kubenswrapper[4784]: I0106 09:50:18.998386 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_6e1b3eaf-d1a3-416d-97f8-3a889e94f595/openstack-network-exporter/0.log" Jan 06 09:50:19 crc kubenswrapper[4784]: I0106 09:50:19.144069 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_6e1b3eaf-d1a3-416d-97f8-3a889e94f595/ovsdbserver-sb/0.log" Jan 06 09:50:19 crc kubenswrapper[4784]: I0106 09:50:19.229733 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_59ff486b-0d07-48c7-bb88-0d13a30eaade/openstack-network-exporter/0.log" Jan 06 09:50:19 crc kubenswrapper[4784]: I0106 09:50:19.286571 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_59ff486b-0d07-48c7-bb88-0d13a30eaade/ovsdbserver-sb/0.log" Jan 06 09:50:19 crc kubenswrapper[4784]: I0106 09:50:19.417662 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_3f448393-522c-4929-be8a-77f402e1d402/setup-container/0.log" Jan 06 09:50:19 crc kubenswrapper[4784]: I0106 09:50:19.575374 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_3f448393-522c-4929-be8a-77f402e1d402/setup-container/0.log" Jan 06 09:50:19 crc kubenswrapper[4784]: I0106 09:50:19.576161 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_3f448393-522c-4929-be8a-77f402e1d402/rabbitmq/0.log" Jan 06 09:50:19 crc kubenswrapper[4784]: I0106 09:50:19.642781 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d73722d6-2310-44ed-a5a7-4dc1c0c7df16/setup-container/0.log" Jan 06 09:50:19 crc kubenswrapper[4784]: I0106 09:50:19.791343 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d73722d6-2310-44ed-a5a7-4dc1c0c7df16/setup-container/0.log" Jan 06 09:50:19 crc kubenswrapper[4784]: I0106 09:50:19.835473 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d73722d6-2310-44ed-a5a7-4dc1c0c7df16/rabbitmq/0.log" Jan 06 09:50:37 crc kubenswrapper[4784]: I0106 09:50:37.607859 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl_1551dd5f-2b41-400e-a9ce-25df87d7935b/util/0.log" Jan 06 09:50:37 crc kubenswrapper[4784]: I0106 09:50:37.788473 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl_1551dd5f-2b41-400e-a9ce-25df87d7935b/util/0.log" Jan 06 09:50:37 crc kubenswrapper[4784]: I0106 09:50:37.797920 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl_1551dd5f-2b41-400e-a9ce-25df87d7935b/pull/0.log" Jan 06 09:50:37 crc kubenswrapper[4784]: I0106 09:50:37.818436 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl_1551dd5f-2b41-400e-a9ce-25df87d7935b/pull/0.log" Jan 06 09:50:37 crc kubenswrapper[4784]: I0106 09:50:37.994105 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl_1551dd5f-2b41-400e-a9ce-25df87d7935b/extract/0.log" Jan 06 09:50:38 crc kubenswrapper[4784]: I0106 09:50:38.002644 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl_1551dd5f-2b41-400e-a9ce-25df87d7935b/util/0.log" Jan 06 09:50:38 crc kubenswrapper[4784]: I0106 09:50:38.008661 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_78dfb6ba50c096c51951a25c0a0a46ad3d3339eb48c8929c19d191eba8k85kl_1551dd5f-2b41-400e-a9ce-25df87d7935b/pull/0.log" Jan 06 09:50:38 crc kubenswrapper[4784]: I0106 09:50:38.271562 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-f6f74d6db-zvg9p_1a45b65a-e45a-4a0e-be79-81b1d9d60c35/manager/0.log" Jan 06 09:50:38 crc kubenswrapper[4784]: I0106 09:50:38.308568 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-78979fc445-9c6jw_07c9eb1a-7ea5-4982-a6e9-fffba06a9dd7/manager/0.log" Jan 06 09:50:38 crc kubenswrapper[4784]: I0106 09:50:38.419455 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-66f8b87655-k79sj_9b052e6e-0823-402f-8e8b-bbd48d0ea36d/manager/0.log" Jan 06 09:50:38 crc kubenswrapper[4784]: I0106 09:50:38.568036 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7b549fc966-7krlv_bd426be0-7b17-4514-b36a-8e25c067a2e9/manager/0.log" Jan 06 09:50:38 crc kubenswrapper[4784]: I0106 09:50:38.604686 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-658dd65b86-nkztz_fc26f7fb-7146-41b2-b288-96c827f08dd4/manager/0.log" Jan 06 09:50:38 crc kubenswrapper[4784]: I0106 09:50:38.726332 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7f5ddd8d7b-lw8c8_6da1078f-40ff-46ae-af83-b4befd08da78/manager/0.log" Jan 06 09:50:39 crc kubenswrapper[4784]: I0106 09:50:39.058746 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-f99f54bc8-ntw8z_74544171-df6c-4a36-8955-3b11da058598/manager/0.log" Jan 06 09:50:39 crc kubenswrapper[4784]: I0106 09:50:39.072027 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6d99759cf-tqhzj_edc01c87-757b-47c7-b3cb-bfbb7ec71797/manager/0.log" Jan 06 09:50:39 crc kubenswrapper[4784]: I0106 09:50:39.272342 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-568985c78-q4rwb_4a449bab-eabb-457b-94a3-c2a5bfd9827c/manager/0.log" Jan 06 09:50:39 crc kubenswrapper[4784]: I0106 09:50:39.292982 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-598945d5b8-s6sv6_1c68028b-8461-491c-bf85-78776c51b77d/manager/0.log" Jan 06 09:50:39 crc kubenswrapper[4784]: I0106 09:50:39.456125 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-7b88bfc995-chgf2_d74a08a3-5b46-47c3-8c74-d29e532e4df3/manager/0.log" Jan 06 09:50:39 crc kubenswrapper[4784]: I0106 09:50:39.539180 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7cd87b778f-5t7n9_754f68df-1972-403c-927d-898d74e9191a/manager/0.log" Jan 06 09:50:39 crc kubenswrapper[4784]: I0106 09:50:39.717715 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-5fbbf8b6cc-pz42s_fba34147-9dd8-4e21-a886-9d1de06ef7ad/manager/0.log" Jan 06 09:50:39 crc kubenswrapper[4784]: I0106 09:50:39.779309 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-68c649d9d-9gkmh_adad8ba1-23e8-40d7-95b8-60cc2803765c/manager/0.log" Jan 06 09:50:39 crc kubenswrapper[4784]: I0106 09:50:39.978980 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-74b998cd6-96jtt_5303160b-4666-4800-8f86-72b1a823073d/manager/0.log" Jan 06 09:50:40 crc kubenswrapper[4784]: I0106 09:50:40.376031 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-7746f6c5b8-f94bg_90e35bd2-b005-4bb0-a024-9e213e8ba6ab/operator/0.log" Jan 06 09:50:40 crc kubenswrapper[4784]: I0106 09:50:40.533041 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-r55lw_5f00cd83-9e7b-4df9-a67e-3b33461aa39b/registry-server/0.log" Jan 06 09:50:40 crc kubenswrapper[4784]: I0106 09:50:40.620698 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-bf6d4f946-8lhqg_18c82783-5901-4ed0-ba58-23f5951cc7d1/manager/0.log" Jan 06 09:50:40 crc kubenswrapper[4784]: I0106 09:50:40.858672 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-9b6f8f78c-8bdgx_8ef4d6f9-7a4c-4f30-984c-0a96f344a539/manager/0.log" Jan 06 09:50:40 crc kubenswrapper[4784]: I0106 09:50:40.926141 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-77df58d67c-27bnt_05e4a6ac-9ee3-4726-8709-945b37705103/manager/0.log" Jan 06 09:50:40 crc kubenswrapper[4784]: I0106 09:50:40.972368 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-cqj6x_76b61982-bafd-4f7a-a473-f5c0ef78ee74/operator/0.log" Jan 06 09:50:41 crc kubenswrapper[4784]: I0106 09:50:41.113363 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-bb586bbf4-rvf72_943574ea-c3fa-4541-8c22-d0b799c7497a/manager/0.log" Jan 06 09:50:41 crc kubenswrapper[4784]: I0106 09:50:41.200901 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-68d988df55-r6lj7_4e27981f-a624-4063-b5af-a6ee3fd1c535/manager/0.log" Jan 06 09:50:41 crc kubenswrapper[4784]: I0106 09:50:41.303880 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-9dbdf6486-wk4xc_fd92de86-98c0-4c11-9895-7b78b2aef05b/manager/0.log" Jan 06 09:50:41 crc kubenswrapper[4784]: I0106 09:50:41.344633 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-6c866cfdcb-qfc8h_184dc664-1b08-43fe-bc79-6b6bdcc2563f/manager/0.log" Jan 06 09:50:44 crc kubenswrapper[4784]: I0106 09:50:44.351055 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:50:44 crc kubenswrapper[4784]: I0106 09:50:44.353276 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:50:53 crc kubenswrapper[4784]: I0106 09:50:53.676528 4784 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-llt8k"] Jan 06 09:50:53 crc kubenswrapper[4784]: E0106 09:50:53.677754 4784 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41288985-0381-4f8a-b6e3-304b8f66cdf5" containerName="container-00" Jan 06 09:50:53 crc kubenswrapper[4784]: I0106 09:50:53.677777 4784 state_mem.go:107] "Deleted CPUSet assignment" podUID="41288985-0381-4f8a-b6e3-304b8f66cdf5" containerName="container-00" Jan 06 09:50:53 crc kubenswrapper[4784]: I0106 09:50:53.678098 4784 memory_manager.go:354] "RemoveStaleState removing state" podUID="41288985-0381-4f8a-b6e3-304b8f66cdf5" containerName="container-00" Jan 06 09:50:53 crc kubenswrapper[4784]: I0106 09:50:53.680266 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:50:53 crc kubenswrapper[4784]: I0106 09:50:53.701145 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-llt8k"] Jan 06 09:50:53 crc kubenswrapper[4784]: I0106 09:50:53.776451 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/648bb37a-1a4b-44b0-8236-a2583f88a425-utilities\") pod \"redhat-marketplace-llt8k\" (UID: \"648bb37a-1a4b-44b0-8236-a2583f88a425\") " pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:50:53 crc kubenswrapper[4784]: I0106 09:50:53.776499 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ht8xp\" (UniqueName: \"kubernetes.io/projected/648bb37a-1a4b-44b0-8236-a2583f88a425-kube-api-access-ht8xp\") pod \"redhat-marketplace-llt8k\" (UID: \"648bb37a-1a4b-44b0-8236-a2583f88a425\") " pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:50:53 crc kubenswrapper[4784]: I0106 09:50:53.776529 4784 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/648bb37a-1a4b-44b0-8236-a2583f88a425-catalog-content\") pod \"redhat-marketplace-llt8k\" (UID: \"648bb37a-1a4b-44b0-8236-a2583f88a425\") " pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:50:53 crc kubenswrapper[4784]: I0106 09:50:53.878088 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/648bb37a-1a4b-44b0-8236-a2583f88a425-utilities\") pod \"redhat-marketplace-llt8k\" (UID: \"648bb37a-1a4b-44b0-8236-a2583f88a425\") " pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:50:53 crc kubenswrapper[4784]: I0106 09:50:53.878137 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ht8xp\" (UniqueName: \"kubernetes.io/projected/648bb37a-1a4b-44b0-8236-a2583f88a425-kube-api-access-ht8xp\") pod \"redhat-marketplace-llt8k\" (UID: \"648bb37a-1a4b-44b0-8236-a2583f88a425\") " pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:50:53 crc kubenswrapper[4784]: I0106 09:50:53.878164 4784 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/648bb37a-1a4b-44b0-8236-a2583f88a425-catalog-content\") pod \"redhat-marketplace-llt8k\" (UID: \"648bb37a-1a4b-44b0-8236-a2583f88a425\") " pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:50:53 crc kubenswrapper[4784]: I0106 09:50:53.878783 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/648bb37a-1a4b-44b0-8236-a2583f88a425-utilities\") pod \"redhat-marketplace-llt8k\" (UID: \"648bb37a-1a4b-44b0-8236-a2583f88a425\") " pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:50:53 crc kubenswrapper[4784]: I0106 09:50:53.879128 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/648bb37a-1a4b-44b0-8236-a2583f88a425-catalog-content\") pod \"redhat-marketplace-llt8k\" (UID: \"648bb37a-1a4b-44b0-8236-a2583f88a425\") " pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:50:53 crc kubenswrapper[4784]: I0106 09:50:53.914687 4784 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ht8xp\" (UniqueName: \"kubernetes.io/projected/648bb37a-1a4b-44b0-8236-a2583f88a425-kube-api-access-ht8xp\") pod \"redhat-marketplace-llt8k\" (UID: \"648bb37a-1a4b-44b0-8236-a2583f88a425\") " pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:50:54 crc kubenswrapper[4784]: I0106 09:50:54.011254 4784 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:50:54 crc kubenswrapper[4784]: I0106 09:50:54.470972 4784 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-llt8k"] Jan 06 09:50:55 crc kubenswrapper[4784]: I0106 09:50:55.344321 4784 generic.go:334] "Generic (PLEG): container finished" podID="648bb37a-1a4b-44b0-8236-a2583f88a425" containerID="cbb59edfd69a425bb78cda1d72ff5a94d1969eb72a9a5400e528dcea2d4acb70" exitCode=0 Jan 06 09:50:55 crc kubenswrapper[4784]: I0106 09:50:55.344486 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-llt8k" event={"ID":"648bb37a-1a4b-44b0-8236-a2583f88a425","Type":"ContainerDied","Data":"cbb59edfd69a425bb78cda1d72ff5a94d1969eb72a9a5400e528dcea2d4acb70"} Jan 06 09:50:55 crc kubenswrapper[4784]: I0106 09:50:55.344708 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-llt8k" event={"ID":"648bb37a-1a4b-44b0-8236-a2583f88a425","Type":"ContainerStarted","Data":"64636888d5400f20a4b7df377d955752b0fd6f61a0de73a6c708a4bbf9240d13"} Jan 06 09:50:55 crc kubenswrapper[4784]: I0106 09:50:55.346177 4784 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 06 09:50:56 crc kubenswrapper[4784]: I0106 09:50:56.357439 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-llt8k" event={"ID":"648bb37a-1a4b-44b0-8236-a2583f88a425","Type":"ContainerStarted","Data":"7679ee83dcb37e4846a9631863ed95b6b009954fed636d5b4e05214bacc1e417"} Jan 06 09:50:56 crc kubenswrapper[4784]: E0106 09:50:56.559852 4784 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod648bb37a_1a4b_44b0_8236_a2583f88a425.slice/crio-7679ee83dcb37e4846a9631863ed95b6b009954fed636d5b4e05214bacc1e417.scope\": RecentStats: unable to find data in memory cache]" Jan 06 09:50:57 crc kubenswrapper[4784]: I0106 09:50:57.367045 4784 generic.go:334] "Generic (PLEG): container finished" podID="648bb37a-1a4b-44b0-8236-a2583f88a425" containerID="7679ee83dcb37e4846a9631863ed95b6b009954fed636d5b4e05214bacc1e417" exitCode=0 Jan 06 09:50:57 crc kubenswrapper[4784]: I0106 09:50:57.367167 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-llt8k" event={"ID":"648bb37a-1a4b-44b0-8236-a2583f88a425","Type":"ContainerDied","Data":"7679ee83dcb37e4846a9631863ed95b6b009954fed636d5b4e05214bacc1e417"} Jan 06 09:50:58 crc kubenswrapper[4784]: I0106 09:50:58.377203 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-llt8k" event={"ID":"648bb37a-1a4b-44b0-8236-a2583f88a425","Type":"ContainerStarted","Data":"64d0a18d24e2a956b486dd075af0b0b39cbb4375df36ce3067caf5dd91dc3c64"} Jan 06 09:50:58 crc kubenswrapper[4784]: I0106 09:50:58.399725 4784 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-llt8k" podStartSLOduration=2.915036563 podStartE2EDuration="5.399702025s" podCreationTimestamp="2026-01-06 09:50:53 +0000 UTC" firstStartedPulling="2026-01-06 09:50:55.345925372 +0000 UTC m=+5757.392098219" lastFinishedPulling="2026-01-06 09:50:57.830590804 +0000 UTC m=+5759.876763681" observedRunningTime="2026-01-06 09:50:58.397159226 +0000 UTC m=+5760.443332073" watchObservedRunningTime="2026-01-06 09:50:58.399702025 +0000 UTC m=+5760.445874862" Jan 06 09:51:02 crc kubenswrapper[4784]: I0106 09:51:02.528249 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-qpchx_d9a30f78-3dc1-4289-9560-c385125df7de/control-plane-machine-set-operator/0.log" Jan 06 09:51:02 crc kubenswrapper[4784]: I0106 09:51:02.644797 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-x5q85_c0033eba-a627-460c-b782-04628acbadcf/kube-rbac-proxy/0.log" Jan 06 09:51:02 crc kubenswrapper[4784]: I0106 09:51:02.675953 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-x5q85_c0033eba-a627-460c-b782-04628acbadcf/machine-api-operator/0.log" Jan 06 09:51:04 crc kubenswrapper[4784]: I0106 09:51:04.012064 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:51:04 crc kubenswrapper[4784]: I0106 09:51:04.012145 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:51:04 crc kubenswrapper[4784]: I0106 09:51:04.059790 4784 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:51:04 crc kubenswrapper[4784]: I0106 09:51:04.490916 4784 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:51:04 crc kubenswrapper[4784]: I0106 09:51:04.552869 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-llt8k"] Jan 06 09:51:06 crc kubenswrapper[4784]: I0106 09:51:06.449075 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-llt8k" podUID="648bb37a-1a4b-44b0-8236-a2583f88a425" containerName="registry-server" containerID="cri-o://64d0a18d24e2a956b486dd075af0b0b39cbb4375df36ce3067caf5dd91dc3c64" gracePeriod=2 Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.052117 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.216103 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/648bb37a-1a4b-44b0-8236-a2583f88a425-catalog-content\") pod \"648bb37a-1a4b-44b0-8236-a2583f88a425\" (UID: \"648bb37a-1a4b-44b0-8236-a2583f88a425\") " Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.216161 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ht8xp\" (UniqueName: \"kubernetes.io/projected/648bb37a-1a4b-44b0-8236-a2583f88a425-kube-api-access-ht8xp\") pod \"648bb37a-1a4b-44b0-8236-a2583f88a425\" (UID: \"648bb37a-1a4b-44b0-8236-a2583f88a425\") " Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.216260 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/648bb37a-1a4b-44b0-8236-a2583f88a425-utilities\") pod \"648bb37a-1a4b-44b0-8236-a2583f88a425\" (UID: \"648bb37a-1a4b-44b0-8236-a2583f88a425\") " Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.217736 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/648bb37a-1a4b-44b0-8236-a2583f88a425-utilities" (OuterVolumeSpecName: "utilities") pod "648bb37a-1a4b-44b0-8236-a2583f88a425" (UID: "648bb37a-1a4b-44b0-8236-a2583f88a425"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.222687 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/648bb37a-1a4b-44b0-8236-a2583f88a425-kube-api-access-ht8xp" (OuterVolumeSpecName: "kube-api-access-ht8xp") pod "648bb37a-1a4b-44b0-8236-a2583f88a425" (UID: "648bb37a-1a4b-44b0-8236-a2583f88a425"). InnerVolumeSpecName "kube-api-access-ht8xp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.238984 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/648bb37a-1a4b-44b0-8236-a2583f88a425-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "648bb37a-1a4b-44b0-8236-a2583f88a425" (UID: "648bb37a-1a4b-44b0-8236-a2583f88a425"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.318258 4784 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/648bb37a-1a4b-44b0-8236-a2583f88a425-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.318296 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ht8xp\" (UniqueName: \"kubernetes.io/projected/648bb37a-1a4b-44b0-8236-a2583f88a425-kube-api-access-ht8xp\") on node \"crc\" DevicePath \"\"" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.318310 4784 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/648bb37a-1a4b-44b0-8236-a2583f88a425-utilities\") on node \"crc\" DevicePath \"\"" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.464096 4784 generic.go:334] "Generic (PLEG): container finished" podID="648bb37a-1a4b-44b0-8236-a2583f88a425" containerID="64d0a18d24e2a956b486dd075af0b0b39cbb4375df36ce3067caf5dd91dc3c64" exitCode=0 Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.464295 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-llt8k" event={"ID":"648bb37a-1a4b-44b0-8236-a2583f88a425","Type":"ContainerDied","Data":"64d0a18d24e2a956b486dd075af0b0b39cbb4375df36ce3067caf5dd91dc3c64"} Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.464422 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-llt8k" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.464616 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-llt8k" event={"ID":"648bb37a-1a4b-44b0-8236-a2583f88a425","Type":"ContainerDied","Data":"64636888d5400f20a4b7df377d955752b0fd6f61a0de73a6c708a4bbf9240d13"} Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.464653 4784 scope.go:117] "RemoveContainer" containerID="64d0a18d24e2a956b486dd075af0b0b39cbb4375df36ce3067caf5dd91dc3c64" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.498348 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-llt8k"] Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.502787 4784 scope.go:117] "RemoveContainer" containerID="7679ee83dcb37e4846a9631863ed95b6b009954fed636d5b4e05214bacc1e417" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.506438 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-llt8k"] Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.525942 4784 scope.go:117] "RemoveContainer" containerID="cbb59edfd69a425bb78cda1d72ff5a94d1969eb72a9a5400e528dcea2d4acb70" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.559653 4784 scope.go:117] "RemoveContainer" containerID="64d0a18d24e2a956b486dd075af0b0b39cbb4375df36ce3067caf5dd91dc3c64" Jan 06 09:51:07 crc kubenswrapper[4784]: E0106 09:51:07.560092 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64d0a18d24e2a956b486dd075af0b0b39cbb4375df36ce3067caf5dd91dc3c64\": container with ID starting with 64d0a18d24e2a956b486dd075af0b0b39cbb4375df36ce3067caf5dd91dc3c64 not found: ID does not exist" containerID="64d0a18d24e2a956b486dd075af0b0b39cbb4375df36ce3067caf5dd91dc3c64" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.560132 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64d0a18d24e2a956b486dd075af0b0b39cbb4375df36ce3067caf5dd91dc3c64"} err="failed to get container status \"64d0a18d24e2a956b486dd075af0b0b39cbb4375df36ce3067caf5dd91dc3c64\": rpc error: code = NotFound desc = could not find container \"64d0a18d24e2a956b486dd075af0b0b39cbb4375df36ce3067caf5dd91dc3c64\": container with ID starting with 64d0a18d24e2a956b486dd075af0b0b39cbb4375df36ce3067caf5dd91dc3c64 not found: ID does not exist" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.560160 4784 scope.go:117] "RemoveContainer" containerID="7679ee83dcb37e4846a9631863ed95b6b009954fed636d5b4e05214bacc1e417" Jan 06 09:51:07 crc kubenswrapper[4784]: E0106 09:51:07.560474 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7679ee83dcb37e4846a9631863ed95b6b009954fed636d5b4e05214bacc1e417\": container with ID starting with 7679ee83dcb37e4846a9631863ed95b6b009954fed636d5b4e05214bacc1e417 not found: ID does not exist" containerID="7679ee83dcb37e4846a9631863ed95b6b009954fed636d5b4e05214bacc1e417" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.560510 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7679ee83dcb37e4846a9631863ed95b6b009954fed636d5b4e05214bacc1e417"} err="failed to get container status \"7679ee83dcb37e4846a9631863ed95b6b009954fed636d5b4e05214bacc1e417\": rpc error: code = NotFound desc = could not find container \"7679ee83dcb37e4846a9631863ed95b6b009954fed636d5b4e05214bacc1e417\": container with ID starting with 7679ee83dcb37e4846a9631863ed95b6b009954fed636d5b4e05214bacc1e417 not found: ID does not exist" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.560534 4784 scope.go:117] "RemoveContainer" containerID="cbb59edfd69a425bb78cda1d72ff5a94d1969eb72a9a5400e528dcea2d4acb70" Jan 06 09:51:07 crc kubenswrapper[4784]: E0106 09:51:07.560879 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cbb59edfd69a425bb78cda1d72ff5a94d1969eb72a9a5400e528dcea2d4acb70\": container with ID starting with cbb59edfd69a425bb78cda1d72ff5a94d1969eb72a9a5400e528dcea2d4acb70 not found: ID does not exist" containerID="cbb59edfd69a425bb78cda1d72ff5a94d1969eb72a9a5400e528dcea2d4acb70" Jan 06 09:51:07 crc kubenswrapper[4784]: I0106 09:51:07.560912 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cbb59edfd69a425bb78cda1d72ff5a94d1969eb72a9a5400e528dcea2d4acb70"} err="failed to get container status \"cbb59edfd69a425bb78cda1d72ff5a94d1969eb72a9a5400e528dcea2d4acb70\": rpc error: code = NotFound desc = could not find container \"cbb59edfd69a425bb78cda1d72ff5a94d1969eb72a9a5400e528dcea2d4acb70\": container with ID starting with cbb59edfd69a425bb78cda1d72ff5a94d1969eb72a9a5400e528dcea2d4acb70 not found: ID does not exist" Jan 06 09:51:08 crc kubenswrapper[4784]: I0106 09:51:08.330275 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="648bb37a-1a4b-44b0-8236-a2583f88a425" path="/var/lib/kubelet/pods/648bb37a-1a4b-44b0-8236-a2583f88a425/volumes" Jan 06 09:51:14 crc kubenswrapper[4784]: I0106 09:51:14.351448 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:51:14 crc kubenswrapper[4784]: I0106 09:51:14.352191 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:51:17 crc kubenswrapper[4784]: I0106 09:51:17.323405 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-rl2h7_c6e05974-22af-4307-83d0-4707be8f8694/cert-manager-controller/0.log" Jan 06 09:51:17 crc kubenswrapper[4784]: I0106 09:51:17.657288 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-57bnw_bd7d1a44-da72-4cb3-bbb9-b71e33c366dc/cert-manager-cainjector/0.log" Jan 06 09:51:17 crc kubenswrapper[4784]: I0106 09:51:17.694574 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-bzzrv_958c414a-2141-4223-bf2d-f4ba1a83194e/cert-manager-webhook/0.log" Jan 06 09:51:18 crc kubenswrapper[4784]: I0106 09:51:18.812065 4784 scope.go:117] "RemoveContainer" containerID="5bb82a00eda52a8be32fae091f328c58983d956b96488dc5dedb46493f5a515a" Jan 06 09:51:18 crc kubenswrapper[4784]: I0106 09:51:18.844370 4784 scope.go:117] "RemoveContainer" containerID="6cb1d09301f5dfb7866d4c7a5525b27aeab64395c8bf2d1982b1623985c06182" Jan 06 09:51:32 crc kubenswrapper[4784]: I0106 09:51:32.120727 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-6ff7998486-9zrkp_e2e0c600-184b-4cc7-8b1f-ed0d37f7d141/nmstate-console-plugin/0.log" Jan 06 09:51:32 crc kubenswrapper[4784]: I0106 09:51:32.290929 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-rh8g6_10985894-27f3-4621-ad1a-40ac399731e4/nmstate-handler/0.log" Jan 06 09:51:32 crc kubenswrapper[4784]: I0106 09:51:32.292435 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-tjmr9_f0efdd39-6cba-42a7-9222-9084987431a7/kube-rbac-proxy/0.log" Jan 06 09:51:32 crc kubenswrapper[4784]: I0106 09:51:32.388334 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-tjmr9_f0efdd39-6cba-42a7-9222-9084987431a7/nmstate-metrics/0.log" Jan 06 09:51:32 crc kubenswrapper[4784]: I0106 09:51:32.492476 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-6769fb99d-p54lp_dcdd9a4f-b0d0-4fc3-8d24-1bb09a29fcaa/nmstate-operator/0.log" Jan 06 09:51:32 crc kubenswrapper[4784]: I0106 09:51:32.571579 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-f8fb84555-m9sxj_d6b7564f-4ee7-4b23-93b1-8252ba326f2e/nmstate-webhook/0.log" Jan 06 09:51:44 crc kubenswrapper[4784]: I0106 09:51:44.351794 4784 patch_prober.go:28] interesting pod/machine-config-daemon-68nth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 06 09:51:44 crc kubenswrapper[4784]: I0106 09:51:44.352437 4784 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 06 09:51:44 crc kubenswrapper[4784]: I0106 09:51:44.352495 4784 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-68nth" Jan 06 09:51:44 crc kubenswrapper[4784]: I0106 09:51:44.353529 4784 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8"} pod="openshift-machine-config-operator/machine-config-daemon-68nth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 06 09:51:44 crc kubenswrapper[4784]: I0106 09:51:44.353643 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerName="machine-config-daemon" containerID="cri-o://0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" gracePeriod=600 Jan 06 09:51:44 crc kubenswrapper[4784]: I0106 09:51:44.786264 4784 generic.go:334] "Generic (PLEG): container finished" podID="94b52312-7b54-4df2-ab82-0eb7b01334f7" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" exitCode=0 Jan 06 09:51:44 crc kubenswrapper[4784]: I0106 09:51:44.786316 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerDied","Data":"0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8"} Jan 06 09:51:44 crc kubenswrapper[4784]: I0106 09:51:44.786358 4784 scope.go:117] "RemoveContainer" containerID="72e2ee1df1508c45b4af0f614c4678cd408cada5da9dc40a3583054dd2332a7e" Jan 06 09:51:44 crc kubenswrapper[4784]: E0106 09:51:44.981289 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:51:45 crc kubenswrapper[4784]: I0106 09:51:45.795501 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:51:45 crc kubenswrapper[4784]: E0106 09:51:45.795755 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:51:47 crc kubenswrapper[4784]: I0106 09:51:47.802983 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-crpzt_7ec719ea-f249-4012-bd61-a87a31829e9c/kube-rbac-proxy/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.018698 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/cp-frr-files/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.119315 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-crpzt_7ec719ea-f249-4012-bd61-a87a31829e9c/controller/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.219672 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/cp-frr-files/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.245914 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/cp-metrics/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.257235 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/cp-reloader/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.317572 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/cp-reloader/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.509663 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/cp-frr-files/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.548658 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/cp-reloader/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.551033 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/cp-metrics/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.552821 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/cp-metrics/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.729940 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/cp-metrics/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.737078 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/cp-reloader/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.759732 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/cp-frr-files/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.791528 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/controller/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.905270 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/frr-metrics/0.log" Jan 06 09:51:48 crc kubenswrapper[4784]: I0106 09:51:48.921412 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/kube-rbac-proxy/0.log" Jan 06 09:51:49 crc kubenswrapper[4784]: I0106 09:51:49.006249 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/kube-rbac-proxy-frr/0.log" Jan 06 09:51:49 crc kubenswrapper[4784]: I0106 09:51:49.131071 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/reloader/0.log" Jan 06 09:51:49 crc kubenswrapper[4784]: I0106 09:51:49.220349 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7784b6fcf-sw629_5cabc455-2ed6-4b62-aba3-d7cdff292e99/frr-k8s-webhook-server/0.log" Jan 06 09:51:49 crc kubenswrapper[4784]: I0106 09:51:49.392645 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-664557b5f7-d75n6_fb0ad91d-c7ef-4b01-87da-5dca6d1ef3ea/manager/0.log" Jan 06 09:51:49 crc kubenswrapper[4784]: I0106 09:51:49.535626 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5ff8cf5854-j76qf_a53a2791-cbe2-49ba-838a-9c79c130186f/webhook-server/0.log" Jan 06 09:51:49 crc kubenswrapper[4784]: I0106 09:51:49.603667 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg6rr_3a8e306b-e5ee-4ced-b7be-f0a26248db92/kube-rbac-proxy/0.log" Jan 06 09:51:50 crc kubenswrapper[4784]: I0106 09:51:50.188212 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg6rr_3a8e306b-e5ee-4ced-b7be-f0a26248db92/speaker/0.log" Jan 06 09:51:50 crc kubenswrapper[4784]: I0106 09:51:50.643923 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tcrn4_310dd289-ef3f-4c2b-87bf-eec891361a6e/frr/0.log" Jan 06 09:52:00 crc kubenswrapper[4784]: I0106 09:52:00.312771 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:52:00 crc kubenswrapper[4784]: E0106 09:52:00.313803 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:52:04 crc kubenswrapper[4784]: I0106 09:52:04.760460 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl_6acbaac6-e54d-44c6-a0a3-cafa748daa9a/util/0.log" Jan 06 09:52:04 crc kubenswrapper[4784]: I0106 09:52:04.951374 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl_6acbaac6-e54d-44c6-a0a3-cafa748daa9a/util/0.log" Jan 06 09:52:04 crc kubenswrapper[4784]: I0106 09:52:04.978152 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl_6acbaac6-e54d-44c6-a0a3-cafa748daa9a/pull/0.log" Jan 06 09:52:05 crc kubenswrapper[4784]: I0106 09:52:05.033399 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl_6acbaac6-e54d-44c6-a0a3-cafa748daa9a/pull/0.log" Jan 06 09:52:05 crc kubenswrapper[4784]: I0106 09:52:05.161367 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl_6acbaac6-e54d-44c6-a0a3-cafa748daa9a/util/0.log" Jan 06 09:52:05 crc kubenswrapper[4784]: I0106 09:52:05.191676 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl_6acbaac6-e54d-44c6-a0a3-cafa748daa9a/pull/0.log" Jan 06 09:52:05 crc kubenswrapper[4784]: I0106 09:52:05.193938 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aj4zkl_6acbaac6-e54d-44c6-a0a3-cafa748daa9a/extract/0.log" Jan 06 09:52:05 crc kubenswrapper[4784]: I0106 09:52:05.334427 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2_2f835431-ce80-49f1-a8b5-6fc6319cfe13/util/0.log" Jan 06 09:52:05 crc kubenswrapper[4784]: I0106 09:52:05.504747 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2_2f835431-ce80-49f1-a8b5-6fc6319cfe13/pull/0.log" Jan 06 09:52:05 crc kubenswrapper[4784]: I0106 09:52:05.519329 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2_2f835431-ce80-49f1-a8b5-6fc6319cfe13/pull/0.log" Jan 06 09:52:05 crc kubenswrapper[4784]: I0106 09:52:05.532814 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2_2f835431-ce80-49f1-a8b5-6fc6319cfe13/util/0.log" Jan 06 09:52:05 crc kubenswrapper[4784]: I0106 09:52:05.706535 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2_2f835431-ce80-49f1-a8b5-6fc6319cfe13/util/0.log" Jan 06 09:52:05 crc kubenswrapper[4784]: I0106 09:52:05.715069 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2_2f835431-ce80-49f1-a8b5-6fc6319cfe13/pull/0.log" Jan 06 09:52:05 crc kubenswrapper[4784]: I0106 09:52:05.733085 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4m9bx2_2f835431-ce80-49f1-a8b5-6fc6319cfe13/extract/0.log" Jan 06 09:52:05 crc kubenswrapper[4784]: I0106 09:52:05.863732 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx_4f1df559-23aa-4dbe-859b-3404209af722/util/0.log" Jan 06 09:52:06 crc kubenswrapper[4784]: I0106 09:52:06.109246 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx_4f1df559-23aa-4dbe-859b-3404209af722/pull/0.log" Jan 06 09:52:06 crc kubenswrapper[4784]: I0106 09:52:06.115681 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx_4f1df559-23aa-4dbe-859b-3404209af722/util/0.log" Jan 06 09:52:06 crc kubenswrapper[4784]: I0106 09:52:06.116582 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx_4f1df559-23aa-4dbe-859b-3404209af722/pull/0.log" Jan 06 09:52:06 crc kubenswrapper[4784]: I0106 09:52:06.289708 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx_4f1df559-23aa-4dbe-859b-3404209af722/util/0.log" Jan 06 09:52:06 crc kubenswrapper[4784]: I0106 09:52:06.297964 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx_4f1df559-23aa-4dbe-859b-3404209af722/extract/0.log" Jan 06 09:52:06 crc kubenswrapper[4784]: I0106 09:52:06.329089 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8s8grx_4f1df559-23aa-4dbe-859b-3404209af722/pull/0.log" Jan 06 09:52:06 crc kubenswrapper[4784]: I0106 09:52:06.429351 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-c9kbb_52672fc6-6a1b-4b75-aca8-c7208aabcfe8/extract-utilities/0.log" Jan 06 09:52:06 crc kubenswrapper[4784]: I0106 09:52:06.616207 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-c9kbb_52672fc6-6a1b-4b75-aca8-c7208aabcfe8/extract-utilities/0.log" Jan 06 09:52:06 crc kubenswrapper[4784]: I0106 09:52:06.646793 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-c9kbb_52672fc6-6a1b-4b75-aca8-c7208aabcfe8/extract-content/0.log" Jan 06 09:52:06 crc kubenswrapper[4784]: I0106 09:52:06.665415 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-c9kbb_52672fc6-6a1b-4b75-aca8-c7208aabcfe8/extract-content/0.log" Jan 06 09:52:06 crc kubenswrapper[4784]: I0106 09:52:06.846856 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-c9kbb_52672fc6-6a1b-4b75-aca8-c7208aabcfe8/extract-content/0.log" Jan 06 09:52:06 crc kubenswrapper[4784]: I0106 09:52:06.907807 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-c9kbb_52672fc6-6a1b-4b75-aca8-c7208aabcfe8/extract-utilities/0.log" Jan 06 09:52:07 crc kubenswrapper[4784]: I0106 09:52:07.093529 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4qddw_bab3a775-7540-4d7a-8ec1-c954a0f0fd08/extract-utilities/0.log" Jan 06 09:52:07 crc kubenswrapper[4784]: I0106 09:52:07.300006 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4qddw_bab3a775-7540-4d7a-8ec1-c954a0f0fd08/extract-utilities/0.log" Jan 06 09:52:07 crc kubenswrapper[4784]: I0106 09:52:07.369886 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4qddw_bab3a775-7540-4d7a-8ec1-c954a0f0fd08/extract-content/0.log" Jan 06 09:52:07 crc kubenswrapper[4784]: I0106 09:52:07.425839 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4qddw_bab3a775-7540-4d7a-8ec1-c954a0f0fd08/extract-content/0.log" Jan 06 09:52:07 crc kubenswrapper[4784]: I0106 09:52:07.464771 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-c9kbb_52672fc6-6a1b-4b75-aca8-c7208aabcfe8/registry-server/0.log" Jan 06 09:52:07 crc kubenswrapper[4784]: I0106 09:52:07.622019 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4qddw_bab3a775-7540-4d7a-8ec1-c954a0f0fd08/extract-content/0.log" Jan 06 09:52:07 crc kubenswrapper[4784]: I0106 09:52:07.713737 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4qddw_bab3a775-7540-4d7a-8ec1-c954a0f0fd08/extract-utilities/0.log" Jan 06 09:52:07 crc kubenswrapper[4784]: I0106 09:52:07.886444 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-mq9b8_51cd7ec7-c900-4f63-bcee-5f0f9e215e69/marketplace-operator/0.log" Jan 06 09:52:08 crc kubenswrapper[4784]: I0106 09:52:08.023800 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-282gk_3467c4e4-9bca-403c-b618-cd6db316a863/extract-utilities/0.log" Jan 06 09:52:08 crc kubenswrapper[4784]: I0106 09:52:08.230648 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-282gk_3467c4e4-9bca-403c-b618-cd6db316a863/extract-utilities/0.log" Jan 06 09:52:08 crc kubenswrapper[4784]: I0106 09:52:08.251089 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-282gk_3467c4e4-9bca-403c-b618-cd6db316a863/extract-content/0.log" Jan 06 09:52:08 crc kubenswrapper[4784]: I0106 09:52:08.297659 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-282gk_3467c4e4-9bca-403c-b618-cd6db316a863/extract-content/0.log" Jan 06 09:52:08 crc kubenswrapper[4784]: I0106 09:52:08.360395 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4qddw_bab3a775-7540-4d7a-8ec1-c954a0f0fd08/registry-server/0.log" Jan 06 09:52:08 crc kubenswrapper[4784]: I0106 09:52:08.585892 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-282gk_3467c4e4-9bca-403c-b618-cd6db316a863/extract-utilities/0.log" Jan 06 09:52:08 crc kubenswrapper[4784]: I0106 09:52:08.655706 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-btpfr_459288c9-529b-4c96-8547-522e2e07cbb9/extract-utilities/0.log" Jan 06 09:52:08 crc kubenswrapper[4784]: I0106 09:52:08.681708 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-282gk_3467c4e4-9bca-403c-b618-cd6db316a863/extract-content/0.log" Jan 06 09:52:08 crc kubenswrapper[4784]: I0106 09:52:08.740407 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-282gk_3467c4e4-9bca-403c-b618-cd6db316a863/registry-server/0.log" Jan 06 09:52:09 crc kubenswrapper[4784]: I0106 09:52:09.012793 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-btpfr_459288c9-529b-4c96-8547-522e2e07cbb9/extract-utilities/0.log" Jan 06 09:52:09 crc kubenswrapper[4784]: I0106 09:52:09.057805 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-btpfr_459288c9-529b-4c96-8547-522e2e07cbb9/extract-content/0.log" Jan 06 09:52:09 crc kubenswrapper[4784]: I0106 09:52:09.077361 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-btpfr_459288c9-529b-4c96-8547-522e2e07cbb9/extract-content/0.log" Jan 06 09:52:09 crc kubenswrapper[4784]: I0106 09:52:09.240054 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-btpfr_459288c9-529b-4c96-8547-522e2e07cbb9/extract-content/0.log" Jan 06 09:52:09 crc kubenswrapper[4784]: I0106 09:52:09.242876 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-btpfr_459288c9-529b-4c96-8547-522e2e07cbb9/extract-utilities/0.log" Jan 06 09:52:09 crc kubenswrapper[4784]: I0106 09:52:09.996336 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-btpfr_459288c9-529b-4c96-8547-522e2e07cbb9/registry-server/0.log" Jan 06 09:52:15 crc kubenswrapper[4784]: I0106 09:52:15.313094 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:52:15 crc kubenswrapper[4784]: E0106 09:52:15.313701 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:52:18 crc kubenswrapper[4784]: I0106 09:52:18.955384 4784 scope.go:117] "RemoveContainer" containerID="9f53d41879fc8c8d25e30863005e67a16ff9acc68b3639929c3482bb4c962a34" Jan 06 09:52:30 crc kubenswrapper[4784]: I0106 09:52:30.337044 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:52:30 crc kubenswrapper[4784]: E0106 09:52:30.337952 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:52:43 crc kubenswrapper[4784]: I0106 09:52:43.312039 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:52:43 crc kubenswrapper[4784]: E0106 09:52:43.312753 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:52:57 crc kubenswrapper[4784]: I0106 09:52:57.312870 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:52:57 crc kubenswrapper[4784]: E0106 09:52:57.313585 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:53:10 crc kubenswrapper[4784]: I0106 09:53:10.313645 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:53:10 crc kubenswrapper[4784]: E0106 09:53:10.315056 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:53:25 crc kubenswrapper[4784]: I0106 09:53:25.313033 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:53:25 crc kubenswrapper[4784]: E0106 09:53:25.313682 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:53:32 crc kubenswrapper[4784]: I0106 09:53:32.794090 4784 generic.go:334] "Generic (PLEG): container finished" podID="c9895028-22be-4f7e-b2a4-df8f3ff3fd4d" containerID="0d7d545bd205026d88fdf2337829424f8546b541129ecd3149ca2df9dc9bc292" exitCode=0 Jan 06 09:53:32 crc kubenswrapper[4784]: I0106 09:53:32.794293 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xfxs6/must-gather-fk7md" event={"ID":"c9895028-22be-4f7e-b2a4-df8f3ff3fd4d","Type":"ContainerDied","Data":"0d7d545bd205026d88fdf2337829424f8546b541129ecd3149ca2df9dc9bc292"} Jan 06 09:53:32 crc kubenswrapper[4784]: I0106 09:53:32.795695 4784 scope.go:117] "RemoveContainer" containerID="0d7d545bd205026d88fdf2337829424f8546b541129ecd3149ca2df9dc9bc292" Jan 06 09:53:33 crc kubenswrapper[4784]: I0106 09:53:33.193002 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-xfxs6_must-gather-fk7md_c9895028-22be-4f7e-b2a4-df8f3ff3fd4d/gather/0.log" Jan 06 09:53:39 crc kubenswrapper[4784]: I0106 09:53:39.313105 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:53:39 crc kubenswrapper[4784]: E0106 09:53:39.314377 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.190855 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-xfxs6/must-gather-fk7md"] Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.191374 4784 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-xfxs6/must-gather-fk7md" podUID="c9895028-22be-4f7e-b2a4-df8f3ff3fd4d" containerName="copy" containerID="cri-o://018f56bec2edd0f67fcce7419ea6fccfc4a3603858903bdf37b98ff987360a57" gracePeriod=2 Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.198721 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-xfxs6/must-gather-fk7md"] Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.615460 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-xfxs6_must-gather-fk7md_c9895028-22be-4f7e-b2a4-df8f3ff3fd4d/copy/0.log" Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.616254 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xfxs6/must-gather-fk7md" Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.669126 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c9895028-22be-4f7e-b2a4-df8f3ff3fd4d-must-gather-output\") pod \"c9895028-22be-4f7e-b2a4-df8f3ff3fd4d\" (UID: \"c9895028-22be-4f7e-b2a4-df8f3ff3fd4d\") " Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.669636 4784 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6hwp\" (UniqueName: \"kubernetes.io/projected/c9895028-22be-4f7e-b2a4-df8f3ff3fd4d-kube-api-access-k6hwp\") pod \"c9895028-22be-4f7e-b2a4-df8f3ff3fd4d\" (UID: \"c9895028-22be-4f7e-b2a4-df8f3ff3fd4d\") " Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.676259 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9895028-22be-4f7e-b2a4-df8f3ff3fd4d-kube-api-access-k6hwp" (OuterVolumeSpecName: "kube-api-access-k6hwp") pod "c9895028-22be-4f7e-b2a4-df8f3ff3fd4d" (UID: "c9895028-22be-4f7e-b2a4-df8f3ff3fd4d"). InnerVolumeSpecName "kube-api-access-k6hwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.773304 4784 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6hwp\" (UniqueName: \"kubernetes.io/projected/c9895028-22be-4f7e-b2a4-df8f3ff3fd4d-kube-api-access-k6hwp\") on node \"crc\" DevicePath \"\"" Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.788881 4784 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9895028-22be-4f7e-b2a4-df8f3ff3fd4d-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "c9895028-22be-4f7e-b2a4-df8f3ff3fd4d" (UID: "c9895028-22be-4f7e-b2a4-df8f3ff3fd4d"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.875346 4784 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c9895028-22be-4f7e-b2a4-df8f3ff3fd4d-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.880023 4784 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-xfxs6_must-gather-fk7md_c9895028-22be-4f7e-b2a4-df8f3ff3fd4d/copy/0.log" Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.880444 4784 generic.go:334] "Generic (PLEG): container finished" podID="c9895028-22be-4f7e-b2a4-df8f3ff3fd4d" containerID="018f56bec2edd0f67fcce7419ea6fccfc4a3603858903bdf37b98ff987360a57" exitCode=143 Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.880522 4784 scope.go:117] "RemoveContainer" containerID="018f56bec2edd0f67fcce7419ea6fccfc4a3603858903bdf37b98ff987360a57" Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.880642 4784 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xfxs6/must-gather-fk7md" Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.901124 4784 scope.go:117] "RemoveContainer" containerID="0d7d545bd205026d88fdf2337829424f8546b541129ecd3149ca2df9dc9bc292" Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.979153 4784 scope.go:117] "RemoveContainer" containerID="018f56bec2edd0f67fcce7419ea6fccfc4a3603858903bdf37b98ff987360a57" Jan 06 09:53:40 crc kubenswrapper[4784]: E0106 09:53:40.979664 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"018f56bec2edd0f67fcce7419ea6fccfc4a3603858903bdf37b98ff987360a57\": container with ID starting with 018f56bec2edd0f67fcce7419ea6fccfc4a3603858903bdf37b98ff987360a57 not found: ID does not exist" containerID="018f56bec2edd0f67fcce7419ea6fccfc4a3603858903bdf37b98ff987360a57" Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.979710 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"018f56bec2edd0f67fcce7419ea6fccfc4a3603858903bdf37b98ff987360a57"} err="failed to get container status \"018f56bec2edd0f67fcce7419ea6fccfc4a3603858903bdf37b98ff987360a57\": rpc error: code = NotFound desc = could not find container \"018f56bec2edd0f67fcce7419ea6fccfc4a3603858903bdf37b98ff987360a57\": container with ID starting with 018f56bec2edd0f67fcce7419ea6fccfc4a3603858903bdf37b98ff987360a57 not found: ID does not exist" Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.979733 4784 scope.go:117] "RemoveContainer" containerID="0d7d545bd205026d88fdf2337829424f8546b541129ecd3149ca2df9dc9bc292" Jan 06 09:53:40 crc kubenswrapper[4784]: E0106 09:53:40.980223 4784 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d7d545bd205026d88fdf2337829424f8546b541129ecd3149ca2df9dc9bc292\": container with ID starting with 0d7d545bd205026d88fdf2337829424f8546b541129ecd3149ca2df9dc9bc292 not found: ID does not exist" containerID="0d7d545bd205026d88fdf2337829424f8546b541129ecd3149ca2df9dc9bc292" Jan 06 09:53:40 crc kubenswrapper[4784]: I0106 09:53:40.980265 4784 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d7d545bd205026d88fdf2337829424f8546b541129ecd3149ca2df9dc9bc292"} err="failed to get container status \"0d7d545bd205026d88fdf2337829424f8546b541129ecd3149ca2df9dc9bc292\": rpc error: code = NotFound desc = could not find container \"0d7d545bd205026d88fdf2337829424f8546b541129ecd3149ca2df9dc9bc292\": container with ID starting with 0d7d545bd205026d88fdf2337829424f8546b541129ecd3149ca2df9dc9bc292 not found: ID does not exist" Jan 06 09:53:42 crc kubenswrapper[4784]: I0106 09:53:42.328447 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9895028-22be-4f7e-b2a4-df8f3ff3fd4d" path="/var/lib/kubelet/pods/c9895028-22be-4f7e-b2a4-df8f3ff3fd4d/volumes" Jan 06 09:53:51 crc kubenswrapper[4784]: I0106 09:53:51.313725 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:53:51 crc kubenswrapper[4784]: E0106 09:53:51.315103 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:54:05 crc kubenswrapper[4784]: I0106 09:54:05.312377 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:54:05 crc kubenswrapper[4784]: E0106 09:54:05.314148 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:54:16 crc kubenswrapper[4784]: I0106 09:54:16.312980 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:54:16 crc kubenswrapper[4784]: E0106 09:54:16.314162 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:54:27 crc kubenswrapper[4784]: I0106 09:54:27.312916 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:54:27 crc kubenswrapper[4784]: E0106 09:54:27.313916 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:54:42 crc kubenswrapper[4784]: I0106 09:54:42.313107 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:54:42 crc kubenswrapper[4784]: E0106 09:54:42.314285 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:54:57 crc kubenswrapper[4784]: I0106 09:54:57.313252 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:54:57 crc kubenswrapper[4784]: E0106 09:54:57.314287 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:55:09 crc kubenswrapper[4784]: I0106 09:55:09.313208 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:55:09 crc kubenswrapper[4784]: E0106 09:55:09.314324 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:55:20 crc kubenswrapper[4784]: I0106 09:55:20.313113 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:55:20 crc kubenswrapper[4784]: E0106 09:55:20.329905 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:55:32 crc kubenswrapper[4784]: I0106 09:55:32.313189 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:55:32 crc kubenswrapper[4784]: E0106 09:55:32.314222 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:55:44 crc kubenswrapper[4784]: I0106 09:55:44.066690 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-b9e3-account-create-update-fc6gj"] Jan 06 09:55:44 crc kubenswrapper[4784]: I0106 09:55:44.078538 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-b9e3-account-create-update-fc6gj"] Jan 06 09:55:44 crc kubenswrapper[4784]: I0106 09:55:44.085594 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-9hlnc"] Jan 06 09:55:44 crc kubenswrapper[4784]: I0106 09:55:44.092303 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-9hlnc"] Jan 06 09:55:44 crc kubenswrapper[4784]: I0106 09:55:44.321614 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aaf0679d-d8f7-4e6b-bef2-23b9337d691d" path="/var/lib/kubelet/pods/aaf0679d-d8f7-4e6b-bef2-23b9337d691d/volumes" Jan 06 09:55:44 crc kubenswrapper[4784]: I0106 09:55:44.322257 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1a0eaf0-8041-4652-9c05-ab5208875034" path="/var/lib/kubelet/pods/f1a0eaf0-8041-4652-9c05-ab5208875034/volumes" Jan 06 09:55:45 crc kubenswrapper[4784]: I0106 09:55:45.312931 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:55:45 crc kubenswrapper[4784]: E0106 09:55:45.313463 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:55:50 crc kubenswrapper[4784]: I0106 09:55:50.033943 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-p8sgr"] Jan 06 09:55:50 crc kubenswrapper[4784]: I0106 09:55:50.041225 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-p8sgr"] Jan 06 09:55:50 crc kubenswrapper[4784]: I0106 09:55:50.329836 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bcb83c22-8916-4c1c-976f-51c157f9b7db" path="/var/lib/kubelet/pods/bcb83c22-8916-4c1c-976f-51c157f9b7db/volumes" Jan 06 09:55:58 crc kubenswrapper[4784]: I0106 09:55:58.317873 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:55:58 crc kubenswrapper[4784]: E0106 09:55:58.318472 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:56:04 crc kubenswrapper[4784]: I0106 09:56:04.040653 4784 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-pcbzh"] Jan 06 09:56:04 crc kubenswrapper[4784]: I0106 09:56:04.046698 4784 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-pcbzh"] Jan 06 09:56:04 crc kubenswrapper[4784]: I0106 09:56:04.322228 4784 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6620201-92ef-41a1-bab1-d72b4bb416b3" path="/var/lib/kubelet/pods/b6620201-92ef-41a1-bab1-d72b4bb416b3/volumes" Jan 06 09:56:10 crc kubenswrapper[4784]: I0106 09:56:10.313314 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:56:10 crc kubenswrapper[4784]: E0106 09:56:10.314137 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:56:19 crc kubenswrapper[4784]: I0106 09:56:19.114203 4784 scope.go:117] "RemoveContainer" containerID="2b1082795b11fded9cfcdea78de92e9ee1106ca386429c47d696842252125f8e" Jan 06 09:56:19 crc kubenswrapper[4784]: I0106 09:56:19.140711 4784 scope.go:117] "RemoveContainer" containerID="b2346999dd29189988a2ad886abf3f4d3c93e9133994cf6dff37a7341c04a39c" Jan 06 09:56:19 crc kubenswrapper[4784]: I0106 09:56:19.213524 4784 scope.go:117] "RemoveContainer" containerID="3df8a17e3455c7d1560bcf7c1ed5c662c4b9e10992f786c1f5bd2bf2a1993fcd" Jan 06 09:56:19 crc kubenswrapper[4784]: I0106 09:56:19.249986 4784 scope.go:117] "RemoveContainer" containerID="a03a331c335015894d1221ef9a5a866b612f27ef8c20f30d3958af0f199feea5" Jan 06 09:56:19 crc kubenswrapper[4784]: I0106 09:56:19.299440 4784 scope.go:117] "RemoveContainer" containerID="bc3dc6c600a0892fc00a16865478bc0e3cf6985b46366b66395c37458344afca" Jan 06 09:56:22 crc kubenswrapper[4784]: I0106 09:56:22.311857 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:56:22 crc kubenswrapper[4784]: E0106 09:56:22.312409 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:56:37 crc kubenswrapper[4784]: I0106 09:56:37.312129 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:56:37 crc kubenswrapper[4784]: E0106 09:56:37.312856 4784 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-68nth_openshift-machine-config-operator(94b52312-7b54-4df2-ab82-0eb7b01334f7)\"" pod="openshift-machine-config-operator/machine-config-daemon-68nth" podUID="94b52312-7b54-4df2-ab82-0eb7b01334f7" Jan 06 09:56:51 crc kubenswrapper[4784]: I0106 09:56:51.312929 4784 scope.go:117] "RemoveContainer" containerID="0a28120516f43c5c8a0153c42c4997c118bbbd3e422306dbab556310d4f01ee8" Jan 06 09:56:52 crc kubenswrapper[4784]: I0106 09:56:52.775366 4784 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-68nth" event={"ID":"94b52312-7b54-4df2-ab82-0eb7b01334f7","Type":"ContainerStarted","Data":"02b3eea5f01f01af0bcb4b8e4f52cd06f14f9ae726bfaa6b932bfb2d8b4b06de"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515127156163024454 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015127156163017371 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015127141654016513 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015127141654015463 5ustar corecore